diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..afc8312d97 --- /dev/null +++ b/.clang-format @@ -0,0 +1,72 @@ +--- +Language: Cpp + +BasedOnStyle: LLVM +AccessModifierOffset: -4 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +# AlignEscapedNewlinesLeft: false +AlignEscapedNewlines: Right +AlignOperands: false +AlignTrailingComments: false +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLambdasOnASingleLine: true +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: false +BinPackParameters: false + +BreakBeforeBraces: Custom +BraceWrapping: + AfterClass: true + AfterControlStatement: true + AfterEnum: true + AfterExternBlock: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + AfterUnion: true + BeforeCatch: true + BeforeElse: true + # BeforeLambdaBody: true + SplitEmptyFunction: false + SplitEmptyNamespace: false + SplitEmptyRecord: false + +BreakConstructorInitializers: BeforeComma +BreakInheritanceList: BeforeComma +ColumnLimit: 80 +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +FixNamespaceComments: true +IndentWidth: 4 +NamespaceIndentation: Inner +PointerAlignment: Right +SortUsingDeclarations: false +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Auto +StatementMacros: ["OPENPMD_private", "OPENPMD_protected"] + +--- +Language: Json +BasedOnStyle: llvm +... diff --git a/.github/ISSUE_TEMPLATE/install_problem.md b/.github/ISSUE_TEMPLATE/install_problem.md index 7a0cfa2991..f327add378 100644 --- a/.github/ISSUE_TEMPLATE/install_problem.md +++ b/.github/ISSUE_TEMPLATE/install_problem.md @@ -5,7 +5,7 @@ labels: install --- **Performed steps** -The following steps +The following steps ```commandline ... diff --git a/.github/ci/sanitizer/clang/Leak.supp b/.github/ci/sanitizer/clang/Leak.supp index 3c482e7069..81e8ee7f75 100644 --- a/.github/ci/sanitizer/clang/Leak.supp +++ b/.github/ci/sanitizer/clang/Leak.supp @@ -15,3 +15,4 @@ leak:adios_inq_var # ADIOS2 leak:adios2::core::engine::SstReader::* leak:adios2::core::engine::SstWriter::* +leak:ps_make_timer_name_ diff --git a/.github/ci/spack-envs/clang10_nopy_ompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clang10_nopy_ompi_h5_ad1_ad2/spack.yaml index 70a3799130..3de011a96f 100644 --- a/.github/ci/spack-envs/clang10_nopy_ompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clang10_nopy_ompi_h5_ad1_ad2/spack.yaml @@ -20,26 +20,26 @@ spack: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: externals: - - spec: "cmake" + - spec: cmake@3.23.0 prefix: /usr buildable: False openmpi: externals: - - spec: "openmpi" + - spec: openmpi@2.1.1 prefix: /usr buildable: False perl: externals: - - spec: "perl" + - spec: perl@5.26.1 prefix: /usr buildable: False python: externals: - - spec: "python" + - spec: python@3.8.0 prefix: /usr buildable: False all: - target: ['x86_64'] + target: [x86_64] variants: ~fortran providers: mpi: [openmpi] @@ -63,3 +63,5 @@ spack: config: build_jobs: 2 + mirrors: + E4S: https://cache.e4s.io diff --git a/.github/ci/spack-envs/clang5_nopy_nompi_h5/spack.yaml b/.github/ci/spack-envs/clang5_nopy_nompi_h5/spack.yaml index 42992dbea4..b0fedbe97b 100644 --- a/.github/ci/spack-envs/clang5_nopy_nompi_h5/spack.yaml +++ b/.github/ci/spack-envs/clang5_nopy_nompi_h5/spack.yaml @@ -10,7 +10,7 @@ spack: packages: all: - target: ['x86_64'] + target: [x86_64] variants: ~mpi ~fortran compiler: [clang@5.0.0] @@ -32,3 +32,5 @@ spack: config: build_jobs: 2 + mirrors: + E4S: https://cache.e4s.io diff --git a/.github/ci/spack-envs/clang5_nopy_ompi_h5_ad1_ad2_bp3/spack.yaml b/.github/ci/spack-envs/clang5_nopy_ompi_h5_ad1_ad2_bp3/spack.yaml index c9f09cf1d6..7323de87c7 100644 --- a/.github/ci/spack-envs/clang5_nopy_ompi_h5_ad1_ad2_bp3/spack.yaml +++ b/.github/ci/spack-envs/clang5_nopy_ompi_h5_ad1_ad2_bp3/spack.yaml @@ -18,26 +18,26 @@ spack: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: externals: - - spec: "cmake" + - spec: cmake@3.23.0 prefix: /usr buildable: False openmpi: externals: - - spec: "openmpi" + - spec: openmpi@2.1.1 prefix: /usr buildable: False perl: externals: - - spec: "perl" + - spec: perl@5.26.1 prefix: /usr buildable: False python: externals: - - spec: "python" + - spec: python@3.8.0 prefix: /usr buildable: False all: - target: ['x86_64'] + target: [x86_64] variants: ~fortran providers: mpi: [openmpi] @@ -61,3 +61,5 @@ spack: config: build_jobs: 2 + mirrors: + E4S: https://cache.e4s.io diff --git a/.github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml index f6db0b1d35..4fccfe9d0b 100644 --- a/.github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml @@ -18,26 +18,26 @@ spack: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: externals: - - spec: "cmake" + - spec: cmake@3.23.0 prefix: /usr buildable: False mpich: externals: - - spec: "mpich" + - spec: mpich@3.3 prefix: /usr buildable: False perl: externals: - - spec: "perl" + - spec: perl@5.26.1 prefix: /usr buildable: False python: externals: - - spec: "python" + - spec: python@3.8.0 prefix: /usr buildable: False all: - target: ['x86_64'] + target: [x86_64] variants: ~fortran providers: mpi: [mpich] @@ -61,3 +61,5 @@ spack: config: build_jobs: 2 + mirrors: + E4S: https://cache.e4s.io diff --git a/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml index 685b4f432e..98acde6e62 100644 --- a/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml @@ -18,26 +18,26 @@ spack: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: externals: - - spec: "cmake" + - spec: cmake@3.23.0 prefix: /usr buildable: False openmpi: externals: - - spec: "openmpi" + - spec: openmpi@2.1.1 prefix: /usr buildable: False perl: externals: - - spec: "perl" + - spec: perl@5.26.1 prefix: /usr buildable: False python: externals: - - spec: "python" + - spec: python@3.8.0 prefix: /usr buildable: False all: - target: ['x86_64'] + target: [x86_64] variants: ~fortran providers: mpi: [openmpi] @@ -61,3 +61,5 @@ spack: config: build_jobs: 2 + mirrors: + E4S: https://cache.e4s.io diff --git a/.github/ci/spack-envs/gcc5_py36_ompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/gcc5_py36_ompi_h5_ad1_ad2/spack.yaml index 7279dee691..d9090d834a 100644 --- a/.github/ci/spack-envs/gcc5_py36_ompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/gcc5_py36_ompi_h5_ad1_ad2/spack.yaml @@ -18,26 +18,26 @@ spack: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: externals: - - spec: "cmake" + - spec: cmake@3.23.0 prefix: /usr buildable: False openmpi: externals: - - spec: "openmpi" + - spec: openmpi@2.1.1 prefix: /usr buildable: False perl: externals: - - spec: "perl" + - spec: perl@5.26.1 prefix: /usr buildable: False python: externals: - - spec: "python" + - spec: python@3.6.3 prefix: /usr buildable: False all: - target: ['x86_64'] + target: [x86_64] variants: ~fortran compiler: [gcc@5.0.0] @@ -59,3 +59,5 @@ spack: config: build_jobs: 2 + mirrors: + E4S: https://cache.e4s.io diff --git a/.github/ci/spack/compilers.yaml b/.github/ci/spack/compilers.yaml deleted file mode 100644 index ab971b0fec..0000000000 --- a/.github/ci/spack/compilers.yaml +++ /dev/null @@ -1,200 +0,0 @@ -compilers: -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: highsierra - paths: - cc: /usr/bin/clang - cxx: /usr/bin/clang++ - f77: null - fc: null - spec: apple-clang@9.1.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: { - cxxflags: "-stdlib=libc++" - } - modules: [] - operating_system: highsierra - paths: - cc: /usr/bin/clang - cxx: /usr/bin/clang++ - f77: null - fc: null - spec: apple-clang@10.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: mojave - paths: - cc: /usr/bin/clang - cxx: /usr/bin/clang++ - f77: null - fc: null - spec: apple-clang@11.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu16.04 - paths: - cc: /usr/lib/llvm-5.0/bin/clang - cxx: /usr/lib/llvm-5.0/bin/clang++ - f77: null - fc: null - spec: clang@5.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: { - cxxflags: "-stdlib=libc++" - } - modules: [] - operating_system: ubuntu16.04 - paths: - cc: /usr/bin/clang-6.0 - cxx: /usr/bin/clang++-6.0 - f77: null - fc: null - spec: clang@6.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu16.04 - paths: - cc: /usr/local/clang-7.0.0/bin/clang - cxx: /usr/local/clang-7.0.0/bin/clang++ - f77: /usr/bin/gfortran-4.9 - fc: /usr/bin/gfortran-4.9 - spec: clang@7.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu14.04 - paths: - cc: /usr/bin/gcc-4.8 - cxx: /usr/bin/g++-4.8 - f77: /usr/bin/gfortran-4.8 - fc: /usr/bin/gfortran-4.8 - spec: gcc@4.8.5 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu14.04 - paths: - cc: /usr/bin/gcc-4.9 - cxx: /usr/bin/g++-4.9 - f77: /usr/bin/gfortran-4.9 - fc: /usr/bin/gfortran-4.9 - spec: gcc@4.9.4 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu14.04 - paths: - cc: /usr/bin/gcc-6 - cxx: /usr/bin/g++-6 - f77: /usr/bin/gfortran-6 - fc: /usr/bin/gfortran-6 - spec: gcc@6.5.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu14.04 - paths: - cc: /usr/bin/gcc-7 - cxx: /usr/bin/g++-7 - f77: /usr/bin/gfortran-7 - fc: /usr/bin/gfortran-7 - spec: gcc@7.4.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu16.04 - paths: - cc: /usr/bin/gcc-8 - cxx: /usr/bin/g++-8 - f77: /usr/bin/gfortran-8 - fc: /usr/bin/gfortran-8 - spec: gcc@8.1.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu18.04 - paths: - cc: /usr/bin/gcc-7 - cxx: /usr/bin/g++-7 - f77: /usr/bin/gfortran-7 - fc: /usr/bin/gfortran-7 - spec: gcc@7.4.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu18.04 - paths: - cc: /usr/bin/clang-8 - cxx: /usr/bin/clang++-8 - f77: null - fc: null - spec: clang@8.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu18.04 - paths: - cc: /usr/bin/clang-10 - cxx: /usr/bin/clang++-10 - f77: null - fc: null - spec: clang@10.0.0 - target: x86_64 -- compiler: - environment: {} - extra_rpaths: [] - flags: {} - modules: [] - operating_system: ubuntu18.04 - paths: - cc: /usr/bin/gcc-9 - cxx: /usr/bin/g++-9 - f77: /usr/bin/gfortran-9 - fc: /usr/bin/gfortran-9 - spec: gcc@9.3.0 - target: x86_64 diff --git a/.github/ci/spack/config.yaml b/.github/ci/spack/config.yaml deleted file mode 100644 index 77a2e80adb..0000000000 --- a/.github/ci/spack/config.yaml +++ /dev/null @@ -1,2 +0,0 @@ -config: - build_jobs: 2 diff --git a/.github/ci/spack/packages.yaml b/.github/ci/spack/packages.yaml deleted file mode 100644 index e449c1d0ca..0000000000 --- a/.github/ci/spack/packages.yaml +++ /dev/null @@ -1,155 +0,0 @@ -packages: - perl: - version: [5.14.0, 5.18.2, 5.22.4, 5.26.2] - externals: - - spec: "perl@5.14.0%gcc@4.8.5 arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "perl@5.14.0%gcc@4.9.4 arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "perl@5.14.0%gcc@6.5.0 arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "perl@5.14.0%gcc@7.4.0 arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "perl@5.22.4%gcc@8.1.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "perl@5.26.2%gcc@9.3.0 arch=linux-ubuntu18-x86_64" - prefix: /usr - - spec: "perl@5.22.4%clang@5.0.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "perl@5.22.4%clang@6.0.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "perl@5.22.4%clang@7.0.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "perl@5.26.1%clang@8.0.0 arch=linux-ubuntu18-x86_64" - prefix: /usr - - spec: "perl@5.26.1%clang@10.0.0 arch=linux-ubuntu18-x86_64" - prefix: /usr - - spec: "perl@5.18.2%apple-clang@9.1.0 arch=darwin-highsierra-x86_64" - prefix: /usr - - spec: "perl@5.18.2%apple-clang@10.0.0 arch=darwin-highsierra-x86_64" - prefix: /usr - - spec: "perl@5.18.2%apple-clang@11.0.0 arch=darwin-mojave-x86_64" - prefix: /usr - buildable: False - cmake: - version: [3.12.0] - externals: - - spec: "cmake@3.12.0%gcc@4.8.5 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%gcc@4.9.4 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%gcc@6.5.0 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%gcc@7.4.0 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%gcc@8.1.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%gcc@9.3.0 arch=linux-ubuntu18-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%clang@5.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%clang@6.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%clang@7.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%clang@8.0.0 arch=linux-ubuntu18-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%clang@10.0.0 arch=linux-ubuntu18-x86_64" - prefix: /home/travis/.cache/cmake-3.12.0 - - spec: "cmake@3.12.0%apple-clang@9.1.0 arch=darwin-highsierra-x86_64" - prefix: /Applications/CMake.app/Contents/ - - spec: "cmake@3.12.0%apple-clang@10.0.0 arch=darwin-highsierra-x86_64" - prefix: /Applications/CMake.app/Contents/ - - spec: "cmake@3.12.0%apple-clang@11.0.0 arch=darwin-mojave-x86_64" - prefix: /Applications/CMake.app/Contents/ - buildable: False - openmpi: - version: [1.6.5, 1.10.2, 2.1.1] - externals: - - spec: "openmpi@1.6.5%gcc@4.8.5 ~wrapper-rpath arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "openmpi@1.6.5%gcc@4.9.4 ~wrapper-rpath arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "openmpi@1.6.5%gcc@6.5.0 ~wrapper-rpath arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "openmpi@1.6.5%gcc@7.4.0 ~wrapper-rpath arch=linux-ubuntu14-x86_64" - prefix: /usr - - spec: "openmpi@1.10.2%gcc@8.1.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "openmpi@2.1.1%gcc@9.3.0 arch=linux-ubuntu18-x86_64" - prefix: /usr - - spec: "openmpi@1.10.2%clang@5.0.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "openmpi@1.10.2%clang@7.0.0 arch=linux-ubuntu16-x86_64" - prefix: /usr - - spec: "openmpi@2.1.1%clang@10.0.0 arch=linux-ubuntu18-x86_64" - prefix: /usr - buildable: False - mpich: - version: [3.3] - externals: - - spec: "mpich@3.3%clang@8.0.0 arch=linux-ubuntu18-x86_64" - prefix:: /usr - buildable: False - hdf5: - version: [1.10.1, 1.8.13] - adios: - variants: ~zfp ~sz ~lz4 ~blosc - adios2: - variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 - # ~shared is a work-around macOS dylib rpath issue for ADIOS2 - # https://github.com/ornladios/ADIOS2/issues/2316 - # https://spack.readthedocs.io/en/latest/config_yaml.html - # ~bzip2 - # Library not loaded: @rpath/libbz2.1.dylib - python: - version: [3.5.5, 3.6.3, 3.7.1, 3.7.2, 3.8.0] - externals: - - spec: "python@3.8.0%clang@10.0.0 arch=linux-ubuntu18-x86_64" - prefix: /home/travis/virtualenv/python3.8 - - spec: "python@3.8.0%clang@8.0.0 arch=linux-ubuntu18-x86_64" - prefix: /home/travis/virtualenv/python3.8 - - spec: "python@3.8.0%clang@7.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/virtualenv/python3.8 - - spec: "python@3.7.1%clang@7.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/virtualenv/python3.7 - - spec: "python@3.6.3%clang@6.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/virtualenv/python3.6 - - spec: "python@3.6.3%clang@5.0.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/virtualenv/python3.6 - - spec: "python@3.8.0%gcc@9.3.0 arch=linux-ubuntu18-x86_64" - prefix: /home/travis/virtualenv/python3.8 - - spec: "python@3.6.3%gcc@7.4.0 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/virtualenv/python3.6 - - spec: "python@3.5.5%gcc@6.5.0 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/virtualenv/python3.5 - - spec: "python@3.6.3%gcc@4.9.4 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/virtualenv/python3.6 - - spec: "python@3.5.5%gcc@4.8.5 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/virtualenv/python3.5 - - spec: "python@3.6.3%gcc@6.5.0 arch=linux-ubuntu14-x86_64" - prefix: /home/travis/virtualenv/python3.6 - - spec: "python@3.7.1%gcc@8.1.0 arch=linux-ubuntu16-x86_64" - prefix: /home/travis/virtualenv/python3.7 - - spec: "python@3.7.2%apple-clang@9.1.0 arch=darwin-highsierra-x86_64" - prefix: /usr/local/opt/python - - spec: "python@3.7.2%apple-clang@10.0.0 arch=darwin-highsierra-x86_64" - prefix: /usr/local/opt/python - - spec: "python@3.7.2%apple-clang@11.0.0 arch=darwin-mojave-x86_64" - prefix: /usr/local/opt/python - buildable: False - - # speed up builds of dependencies - ncurses: - variants: ~termlib - gettext: - variants: ~curses ~libxml2 ~git ~tar ~bzip2 ~xz - py-numpy: - variants: ~blas ~lapack - - # set generic binary generation, mpi providers and compiler versions - all: - target: ['x86_64'] - providers: - mpi: [openmpi, mpich] - compiler: [clang@5.0.0, clang@6.0.0, clang@7.0.0, clang@8.0.0, clang@10.0.0, apple-clang@9.1.0, apple-clang@10.0.0, apple-clang@11.0.0, gcc@4.8.5, gcc@4.9.4, gcc@6.5.0, gcc@7.4.0, gcc@8.1.0, gcc@9.3.0] diff --git a/.github/workflows/dependencies/install_nvcc11.sh b/.github/workflows/dependencies/install_nvcc11.sh index bd12a1cd4d..9b0948926b 100755 --- a/.github/workflows/dependencies/install_nvcc11.sh +++ b/.github/workflows/dependencies/install_nvcc11.sh @@ -15,8 +15,7 @@ sudo apt-get install -y \ pkg-config \ wget -sudo wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub -sudo apt-key add 7fa2af80.pub +sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" \ | sudo tee /etc/apt/sources.list.d/cuda.list sudo apt-get update @@ -27,4 +26,3 @@ sudo apt-get install -y \ cuda-minimal-build-11-2 sudo ln -s cuda-11.2 /usr/local/cuda - diff --git a/.github/workflows/dependencies/install_nvhpc21-5.sh b/.github/workflows/dependencies/install_nvhpc21-11.sh similarity index 50% rename from .github/workflows/dependencies/install_nvhpc21-5.sh rename to .github/workflows/dependencies/install_nvhpc21-11.sh index ee966fef1b..f333d7121b 100755 --- a/.github/workflows/dependencies/install_nvhpc21-5.sh +++ b/.github/workflows/dependencies/install_nvhpc21-11.sh @@ -14,14 +14,13 @@ sudo apt-get install -y \ pkg-config \ wget -wget -q https://developer.download.nvidia.com/hpc-sdk/21.5/nvhpc-21-5_21.5_amd64.deb \ - https://developer.download.nvidia.com/hpc-sdk/21.5/nvhpc-2021_21.5_amd64.deb -sudo apt-get update -sudo apt-get install -y ./nvhpc-21-5_21.5_amd64.deb ./nvhpc-2021_21.5_amd64.deb -rm -rf ./nvhpc-21-5_21.5_amd64.deb ./nvhpc-2021_21.5_amd64.deb +echo 'deb [trusted=yes] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | \ + sudo tee /etc/apt/sources.list.d/nvhpc.list +sudo apt-get update -y +sudo apt-get install -y --no-install-recommends nvhpc-21-11 # things should reside in /opt/nvidia/hpc_sdk now # activation via: # source /etc/profile.d/modules.sh -# module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.7 +# module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/21.11 diff --git a/.github/workflows/dependencies/install_spack b/.github/workflows/dependencies/install_spack index 9c54127010..30d7d06a92 100755 --- a/.github/workflows/dependencies/install_spack +++ b/.github/workflows/dependencies/install_spack @@ -3,7 +3,7 @@ set -eu -o pipefail -spack_ver="2b6f896ca744081a38579573a52824bf334fb54b" +spack_ver="0.17.1" cd /opt if [[ -d spack && ! -f spack_${spack_ver} ]] @@ -13,8 +13,8 @@ fi if [ ! -d spack ] then # download - curl -sOL https://github.com/spack/spack/archive/${spack_ver}.tar.gz - tar -xf ${spack_ver}.tar.gz && rm ${spack_ver}.tar.gz + curl -sOL https://github.com/spack/spack/archive/refs/tags/v${spack_ver}.tar.gz + tar -xf v${spack_ver}.tar.gz && rm v${spack_ver}.tar.gz mv spack-${spack_ver} spack touch spack_${spack_ver} fi @@ -23,14 +23,13 @@ fi ln -s /opt/spack/bin/spack /usr/bin/spack # add binary mirror -spack mirror add E4S https://cache.e4s.io -curl -sOL https://oaciss.uoregon.edu/e4s/e4s.pub -spack gpg trust e4s.pub +#spack mirror add E4S https://cache.e4s.io +#spack buildcache keys -it # find compilers & external packages -spack compiler find +#spack compiler find #spack external find # accessible by regular CI user chmod a+rwx -R /opt/spack -chmod a+rwx $HOME/.spack/ +#chmod a+rwx -R $HOME/.spack/ diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index e1e1b6edec..f655dccee8 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -46,6 +46,10 @@ jobs: - name: Build env: {CC: clang-5.0, CXX: clang++-5.0, CXXFLAGS: -Werror -Wno-deprecated-declarations} run: | + cmake --version + mpiexec --version + perl --version + python --version eval $(spack env activate --sh .github/ci/spack-envs/clang5_nopy_ompi_h5_ad1_ad2_bp3/) spack install @@ -141,6 +145,10 @@ jobs: - name: Build env: {CC: clang-8, CXX: clang++-8, CXXFLAGS: -Werror -Wno-deprecated-declarations, OPENPMD2_ADIOS2_SCHEMA: 20210209} run: | + cmake --version + mpiexec --version + perl --version + python --version eval $(spack env activate --sh .github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/) spack install @@ -270,12 +278,13 @@ jobs: - name: Install shell: bash -eo pipefail -l {0} run: | - conda env create --file conda.yml + conda install -c conda-forge -y mamba + mamba env create --file conda.yml - name: Build shell: bash -eo pipefail -l {0} env: {CXXFLAGS: -Werror -Wno-deprecated-declarations} run: | - conda activate openpmd-api-dev + source activate openpmd-api-dev share/openPMD/download_samples.sh build chmod u-w build/samples/git-sample/*.h5 diff --git a/.github/workflows/nvidia.yml b/.github/workflows/nvidia.yml index f32e343e62..5c008fdfe1 100644 --- a/.github/workflows/nvidia.yml +++ b/.github/workflows/nvidia.yml @@ -27,8 +27,8 @@ jobs: cmake --build build --parallel 2 ctest --test-dir build --output-on-failure - tests-nvhpc21-5-nvcc: - name: NVHPC@21.5 + tests-nvhpc21-11-nvcc: + name: NVHPC@21.11 runs-on: ubuntu-20.04 # Catch warnings: # line 4314: error: variable "::autoRegistrar73" was declared but never referenced @@ -36,11 +36,11 @@ jobs: steps: - uses: actions/checkout@v2 - name: Dependencies - run: .github/workflows/dependencies/install_nvhpc21-5.sh + run: .github/workflows/dependencies/install_nvhpc21-11.sh - name: Build & Install run: | source /etc/profile.d/modules.sh - module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/21.5 + module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/21.11 which nvcc || echo "nvcc not in PATH!" which nvc++ || echo "nvc++ not in PATH!" @@ -63,4 +63,3 @@ jobs: -DopenPMD_USE_INVASIVE_TESTS=ON cmake --build build --parallel 2 ctest --test-dir build --output-on-failure - diff --git a/.github/workflows/source/hasNonASCII b/.github/workflows/source/hasNonASCII index e017725a06..205c69cb1a 100755 --- a/.github/workflows/source/hasNonASCII +++ b/.github/workflows/source/hasNonASCII @@ -38,4 +38,3 @@ do done exit $ok - diff --git a/.lgtm.yml b/.lgtm.yml index 0e3c5b0b07..8f1ac78e25 100644 --- a/.lgtm.yml +++ b/.lgtm.yml @@ -21,4 +21,3 @@ extraction: index: build_command: - $CMAKE --build build -j 2 - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7dc5e9e03f..eff6613ea5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,10 +18,100 @@ exclude: '^share/openPMD/thirdParty' # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.2.0 hooks: + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - id: end-of-file-fixer - id: mixed-line-ending - id: check-json - id: check-toml - id: check-yaml + args: [--allow-multiple-documents] - id: check-added-large-files + - id: requirements-txt-fixer +# - id: fix-encoding-pragma +# exclude: ^noxfile.py$ + +# documentation files: .rst +- repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.9.0 + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal + +#- repo: https://github.com/asottile/pyupgrade +# rev: v2.29.0 +# hooks: +# - id: pyupgrade + +# Changes tabs to spaces +- repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.2.0 + hooks: + - id: remove-tabs + +# CMake formatting +#- repo: https://github.com/cheshirekow/cmake-format-precommit +# rev: v0.6.13 +# hooks: +# - id: cmake-format +# additional_dependencies: [pyyaml] +# types: [file] +# files: (\.cmake|CMakeLists.txt)(.in)?$ + +# C++ formatting +# clang-format v13 +# to run manually, use .github/workflows/clang-format/clang-format.sh +- repo: https://github.com/pre-commit/mirrors-clang-format + rev: v14.0.3 + hooks: + - id: clang-format + +# Autoremoves unused Python imports +- repo: https://github.com/hadialqattan/pycln + rev: v1.3.3 + hooks: + - id: pycln + name: pycln (python) + +# Sorts Python imports according to PEP8 +# https://www.python.org/dev/peps/pep-0008/#imports +- repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + name: isort (python) + +# Python: Flake8 (checks only, does this support auto-fixes?) +#- repo: https://github.com/PyCQA/flake8 +# rev: 4.0.1 +# hooks: +# - id: flake8 +# additional_dependencies: &flake8_dependencies +# - flake8-bugbear +# - pep8-naming +# exclude: ^(docs/.*|tools/.*)$ +# Alternatively: use autopep8? + +# Python Formatting +#- repo: https://github.com/psf/black +# rev: 21.10b0 # Keep in sync with blacken-docs +# hooks: +# - id: black +#- repo: https://github.com/asottile/blacken-docs +# rev: v1.11.0 +# hooks: +# - id: blacken-docs +# additional_dependencies: +# - black==21.10b0 # keep in sync with black hook + +# Checks the manifest for missing files (native support) +- repo: https://github.com/mgedmin/check-manifest + rev: "0.48" + hooks: + - id: check-manifest + # This is a slow hook, so only run this if --hook-stage manual is passed + stages: [manual] + additional_dependencies: [cmake, ninja] diff --git a/.rodare.json b/.rodare.json index cd2777030f..f2324fc670 100644 --- a/.rodare.json +++ b/.rodare.json @@ -1,158 +1,158 @@ { - "creators": [ - { - "name": "Huebl, Axel", - "affiliation": "Lawrence Berkeley National Laboratory", - "orcid": "0000-0003-1943-7141" - }, - { - "name": "Poeschel, Franz", - "affiliation": "Center for Advanced Systems Understanding (CASUS)", - "orcid": "0000-0001-7042-5088" - }, - { - "name": "Koller, Fabian", - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "orcid": "0000-0001-8704-1769" - }, - { - "name": "Gu, Junmin", - "affiliation": "Lawrence Berkeley National Laboratory", - "orcid": "0000-0002-1521-8534" - } + "creators": [ + { + "name": "Huebl, Axel", + "affiliation": "Lawrence Berkeley National Laboratory", + "orcid": "0000-0003-1943-7141" + }, + { + "name": "Poeschel, Franz", + "affiliation": "Center for Advanced Systems Understanding (CASUS)", + "orcid": "0000-0001-7042-5088" + }, + { + "name": "Koller, Fabian", + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "orcid": "0000-0001-8704-1769" + }, + { + "name": "Gu, Junmin", + "affiliation": "Lawrence Berkeley National Laboratory", + "orcid": "0000-0002-1521-8534" + } - ], - "contributors": [ - { - "affiliation": "EU XFEL GmbH", - "name": "Fortmann-Grote, Carsten", - "orcid": "0000-0002-2579-5546", - "type": "Other" - }, - { - "affiliation": "Warsaw University of Technology", - "name": "Stańczak, Dominik", - "orcid": "0000-0001-6291-8843", - "type": "Other" - }, - { - "affiliation": "Fermi National Accelerator Laboratory", - "name": "Amundson, James", - "orcid": "0000-0001-6918-2728", - "type": "Other" - }, - { - "affiliation": "Anaconda, Inc.", - "name": "Donnelly, Ray", - "type": "Other" - }, - { - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "name": "Widera, René", - "orcid": "0000-0003-1642-0459", - "type": "Other" - }, - { - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "name": "Zenker, Erik", - "orcid": "0000-0001-9417-8712", - "type": "Other" - }, - { - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "name": "Bastrakov, Sergei", - "orcid": "0000-0003-3396-6154", - "type": "Other" - }, - { - "affiliation": "Lawrence Berkeley National Laboratory", - "name": "Lehe, Rémi", - "orcid": "0000-0002-3656-9659", - "type": "Other" - }, - { - "affiliation": "Lawrence Berkeley National Laboratory", - "name": "Amorim, Lígia Diana", - "orcid": "0000-0002-1445-0032", - "type": "Other" - }, - { - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "name": "Bastrakova, Kseniia", - "type": "Other" - }, - { - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "name": "Pausch, Richard", - "orcid": "0000-0001-7990-9564", - "type": "Other" - }, - { - "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", - "name": "Ordyna, Paweł", - "type": "Other" - }, - { - "affiliation": "Oak Ridge National Laboratory", - "name": "Ganyushin, Dmitry", - "orcid": "0000-0001-7337-2161", - "type": "Other" - }, - { - "affiliation": "NVIDIA", - "name": "Kirkham, John", - "type": "Other" - }, - { - "affiliation": "Perimeter Institute for Theoretical Physics", - "name": "Schnetter, Erik", - "orcid": "0000-0002-4518-9017", - "type": "Other" - }, - { - "affiliation": "Lawrence Berkeley National Laboratory", - "name": "Bez, Jean Luca", - "orcid": "0000-0002-3915-1135", - "type": "Other" - } - ], - "title": "C++ & Python API for Scientific I/O with openPMD", - "access_right": "open", - "upload_type": "software", - "license": "LGPL-3.0", - "pub_id": "27579", - "description": "openPMD is an open metadata format for open data workflows in open science. This library provides a common high-level API for openPMD writing and reading. It provides a common interface to I/O libraries and file formats such as HDF5 and ADIOS. Where supported, openPMD-api implements both serial and MPI parallel I/O capabilities.", - "keywords": [ - "openPMD", - "Open Science", - "Open Data", - "HDF5", - "ADIOS", - "data", - "MPI", - "HPC", - "research", - "file-format", - "file-handling" - ], - "notes": "Supported by the Exascale Computing Project (17-SC-20-SC), a collaborative effort of two U.S. Department of Energy organizations (Office of Science and the National Nuclear Security Administration). Supported by the Consortium for Advanced Modeling of Particles Accelerators (CAMPA), funded by the U.S. DOE Office of Science under Contract No. DE-AC02-05CH11231. This work was partially funded by the Center of Advanced Systems Understanding (CASUS), which is financed by Germany's Federal Ministry of Education and Research (BMBF) and by the Saxon Ministry for Science, Culture and Tourism (SMWK) with tax funds on the basis of the budget approved by the Saxon State Parliament.", - "grants": [ - { - "id": "654220" - } - ], - "related_identifiers": [ - { - "identifier": "DOI:10.5281/zenodo.1167843", - "relation": "isCitedBy" - }, - { - "identifier": "DOI:10.5281/zenodo.1069534", - "relation": "isCitedBy" - }, - { - "identifier": "DOI:10.5281/zenodo.33624", - "relation": "isCitedBy" - } - ] + ], + "contributors": [ + { + "affiliation": "EU XFEL GmbH", + "name": "Fortmann-Grote, Carsten", + "orcid": "0000-0002-2579-5546", + "type": "Other" + }, + { + "affiliation": "Warsaw University of Technology", + "name": "Stańczak, Dominik", + "orcid": "0000-0001-6291-8843", + "type": "Other" + }, + { + "affiliation": "Fermi National Accelerator Laboratory", + "name": "Amundson, James", + "orcid": "0000-0001-6918-2728", + "type": "Other" + }, + { + "affiliation": "Anaconda, Inc.", + "name": "Donnelly, Ray", + "type": "Other" + }, + { + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "name": "Widera, René", + "orcid": "0000-0003-1642-0459", + "type": "Other" + }, + { + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "name": "Zenker, Erik", + "orcid": "0000-0001-9417-8712", + "type": "Other" + }, + { + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "name": "Bastrakov, Sergei", + "orcid": "0000-0003-3396-6154", + "type": "Other" + }, + { + "affiliation": "Lawrence Berkeley National Laboratory", + "name": "Lehe, Rémi", + "orcid": "0000-0002-3656-9659", + "type": "Other" + }, + { + "affiliation": "Lawrence Berkeley National Laboratory", + "name": "Amorim, Lígia Diana", + "orcid": "0000-0002-1445-0032", + "type": "Other" + }, + { + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "name": "Bastrakova, Kseniia", + "type": "Other" + }, + { + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "name": "Pausch, Richard", + "orcid": "0000-0001-7990-9564", + "type": "Other" + }, + { + "affiliation": "Helmholtz-Zentrum Dresden-Rossendorf", + "name": "Ordyna, Paweł", + "type": "Other" + }, + { + "affiliation": "Oak Ridge National Laboratory", + "name": "Ganyushin, Dmitry", + "orcid": "0000-0001-7337-2161", + "type": "Other" + }, + { + "affiliation": "NVIDIA", + "name": "Kirkham, John", + "type": "Other" + }, + { + "affiliation": "Perimeter Institute for Theoretical Physics", + "name": "Schnetter, Erik", + "orcid": "0000-0002-4518-9017", + "type": "Other" + }, + { + "affiliation": "Lawrence Berkeley National Laboratory", + "name": "Bez, Jean Luca", + "orcid": "0000-0002-3915-1135", + "type": "Other" + } + ], + "title": "C++ & Python API for Scientific I/O with openPMD", + "access_right": "open", + "upload_type": "software", + "license": "LGPL-3.0", + "pub_id": "27579", + "description": "openPMD is an open metadata format for open data workflows in open science. This library provides a common high-level API for openPMD writing and reading. It provides a common interface to I/O libraries and file formats such as HDF5 and ADIOS. Where supported, openPMD-api implements both serial and MPI parallel I/O capabilities.", + "keywords": [ + "openPMD", + "Open Science", + "Open Data", + "HDF5", + "ADIOS", + "data", + "MPI", + "HPC", + "research", + "file-format", + "file-handling" + ], + "notes": "Supported by the Exascale Computing Project (17-SC-20-SC), a collaborative effort of two U.S. Department of Energy organizations (Office of Science and the National Nuclear Security Administration). Supported by the Consortium for Advanced Modeling of Particles Accelerators (CAMPA), funded by the U.S. DOE Office of Science under Contract No. DE-AC02-05CH11231. This work was partially funded by the Center of Advanced Systems Understanding (CASUS), which is financed by Germany's Federal Ministry of Education and Research (BMBF) and by the Saxon Ministry for Science, Culture and Tourism (SMWK) with tax funds on the basis of the budget approved by the Saxon State Parliament.", + "grants": [ + { + "id": "654220" + } + ], + "related_identifiers": [ + { + "identifier": "DOI:10.5281/zenodo.1167843", + "relation": "isCitedBy" + }, + { + "identifier": "DOI:10.5281/zenodo.1069534", + "relation": "isCitedBy" + }, + { + "identifier": "DOI:10.5281/zenodo.33624", + "relation": "isCitedBy" + } + ] } diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 105aa79808..beef923c93 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,6 +3,73 @@ Changelog ========= +0.14.5 +------ +**Date:** 2022-06-07 + +Improve Series Parsing, Python & Fix Backend Bugs + +This release improves reading back iterations that overflow the specified zero-pattern. +ADIOS1, ADIOS2 and HDF5 backend stability and performance were improved. +Python bindings got additional wheel platform support and various smaller issues were fixed. + +Changes to "0.14.4" +^^^^^^^^^^^^^^^^^^^ + +Bug Fixes +""""""""" + +- Series and iterations: + + - fix read of overflowing zero patterns #1173 #1253 + - fix for opening an iteration #1239 +- ADIOS1: + + - fix use-after-free in ``ADIOS1IOHandler`` #1224 + - Remove task from IO queue if it fails with exception #1179 +- ADIOS2: + + - Remove deprecated debug parameter in ADIOS2 #1269 + - Add memory leak suppression: ``ps_make_timer_name_`` #1235 + - Don't safeguard empty strings while reading #1223 +- HDF5: + + - missing HDF5 include #1236 +- Python: + + - Wheels: Fix macOS arm64 (M1) builds #1233 + - Python Iteration: Fix ``__repr__`` (time) #1242 + - Increase reference count also in other ``load_chunk`` overload #1225 + - Do Not Strip Symbols In Debug #1219 + - Patch MSVC pybind11 debug bug #1209 + +Other +""""" + +- HDF5: + + - Improve write time by disabling fill #1192 + - Update documented HDF5 versions with collective metadata issues #1250 +- Print warning if mpi4py is not found in ``openpmd-pipe`` #1186 +- Pass-through flushing parameters #1226 +- Clang-Format #1032 #1222 +- Warnings: + + - Avoid copying std::string in for loop #1268 + - SerialIOTest: Fix GCC Pragma Check #1213 #1260 + - Fix ``-Wsign-compare`` #1202 +- CI: + + - Fix Conda Build - <3 Mamba #1261 + - Fix Spack #1244 + - Update CUDA repo key #1256 + - NVHPC New Apt Repo #1241 +- Python: + + - ``setup.py``: Extra CMake Arg Control #1199 + - sign compare warning #1198 + + 0.14.4 ------ **Date:** 2022-01-21 diff --git a/CITATION.cff b/CITATION.cff index 9bb5879e7b..e97ee83c20 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -25,7 +25,7 @@ contact: orcid: https://orcid.org/0000-0003-1943-7141 email: axelhuebl@lbl.gov title: "openPMD-api: C++ & Python API for Scientific I/O with openPMD" -version: 0.14.4 +version: 0.14.5 repository-code: https://github.com/openPMD/openPMD-api doi: 10.14278/rodare.27 license: LGPL-3.0-or-later @@ -42,4 +42,4 @@ keywords: - openscience - meta-data - adios -- openpmd +- openpmd diff --git a/CMakeLists.txt b/CMakeLists.txt index c8eeec006f..141ec8f65d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ # cmake_minimum_required(VERSION 3.15.0) -project(openPMD VERSION 0.14.4) # LANGUAGES CXX +project(openPMD VERSION 0.14.5) # LANGUAGES CXX # the openPMD "markup"/"schema" standard version set(openPMD_STANDARD_VERSION 1.1.0) @@ -681,8 +681,15 @@ if(openPMD_HAVE_PYTHON) endif() unset(_USE_PY_LTO) - pybind11_extension(openPMD.py) - pybind11_strip(openPMD.py) + if(EMSCRIPTEN) + set_target_properties(openPMD.py PROPERTIES + PREFIX "") + else() + pybind11_extension(openPMD.py) + endif() + if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo) + pybind11_strip(openPMD.py) + endif() set_target_properties(openPMD.py PROPERTIES CXX_VISIBILITY_PRESET "hidden" CUDA_VISIBILITY_PRESET "hidden") @@ -890,11 +897,11 @@ if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) # /usr/lib/llvm-6.0/lib/clang/6.0.0/lib/linux/libclang_rt.ubsan_minimal-x86_64.so # at runtime when used with symbol-hidden code (e.g. pybind11 module) - set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wpedantic -Wshadow -Woverloaded-virtual -Wextra-semi -Wunreachable-code ${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wpedantic -Wshadow -Woverloaded-virtual -Wextra-semi -Wunreachable-code -Wsign-compare ${CMAKE_CXX_FLAGS}") elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") set(CMAKE_CXX_FLAGS "-w3 -wd193,383,1572 ${CMAKE_CXX_FLAGS}") elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wpedantic -Wshadow -Woverloaded-virtual -Wunreachable-code ${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wpedantic -Wshadow -Woverloaded-virtual -Wunreachable-code -Wsign-compare ${CMAKE_CXX_FLAGS}") elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # Warning C4503: "decorated name length exceeded, name was truncated" # Symbols longer than 4096 chars are truncated (and hashed instead) diff --git a/COPYING b/COPYING index 10926e87f1..94a9ed024d 100644 --- a/COPYING +++ b/COPYING @@ -672,4 +672,3 @@ may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . - diff --git a/COPYING.LESSER b/COPYING.LESSER index 341c30bda4..65c5ca88a6 100644 --- a/COPYING.LESSER +++ b/COPYING.LESSER @@ -163,4 +163,3 @@ whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. - diff --git a/README.md b/README.md index 8f251467e3..f48d46a5ea 100644 --- a/README.md +++ b/README.md @@ -212,6 +212,8 @@ export CMAKE_INTERPROCEDURAL_OPTIMIZATION=OFF python3 -m pip install openpmd-api --no-binary openpmd-api ``` +Additional CMake options can be passed via individual environment variables, which need to be prefixed with `openPMD_CMAKE_`. + ### From Source [![Source Use Case](https://img.shields.io/badge/use_case-development-brightgreen)](https://cmake.org) @@ -436,4 +438,3 @@ openPMD-api stands on the shoulders of giants and we are grateful for the follow * the [CMake build system](https://cmake.org) and [contributors](https://github.com/Kitware/CMake/blob/master/Copyright.txt) * packaging support by the [conda-forge](https://conda-forge.org), [PyPI](https://pypi.org) and [Spack](https://spack.io) communities, among others * the [openPMD-standard](https://github.com/openPMD/openPMD-standard) by [Axel Huebl (HZDR, now LBNL)](https://github.com/ax3l) and [contributors](https://github.com/openPMD/openPMD-standard/blob/latest/AUTHORS.md) - diff --git a/cmake/try_variant.cpp b/cmake/try_variant.cpp index 51b8c094f8..455c47738c 100644 --- a/cmake/try_variant.cpp +++ b/cmake/try_variant.cpp @@ -23,25 +23,24 @@ #include #include #if __cplusplus >= 201703L -# include // IWYU pragma: export +#include // IWYU pragma: export #else -# error "Not a C++17 implementation" +#error "Not a C++17 implementation" #endif - int main() { - std::variant< int, float > v; + std::variant v; v = 42; - int i = std::get< int >(v); + int i = std::get(v); assert(42 == i); - assert(42 == std::get< 0 >(v)); + assert(42 == std::get<0>(v)); try { - std::get< float >(v); + std::get(v); } - catch( std::bad_variant_access const & ex ) + catch (std::bad_variant_access const &ex) { std::cout << ex.what() << std::endl; } diff --git a/conda.yml b/conda.yml index fa71660ab8..4f450c790a 100644 --- a/conda.yml +++ b/conda.yml @@ -33,6 +33,7 @@ dependencies: - pandas - pkg-config - pip + - pre-commit - pyarrow # for dask # - pybind11 # shipped internally - python>=3.6 diff --git a/docs/requirements.txt b/docs/requirements.txt index d4d42f3f5b..8fa51acf75 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,14 +1,14 @@ -sphinx_rtd_theme>=0.3.1 -recommonmark -sphinx<4.0 +breathe>=4.5,<4.15 # docutils 0.17 breaks HTML tags & RTD theme # https://github.com/sphinx-doc/sphinx/issues/9001 docutils<=0.16 -breathe>=4.5,<4.15 -sphinxcontrib.programoutput -sphinxcontrib-svg2pdfconverter -pygments # generate plots matplotlib -scipy numpy>=1.15 +pygments +recommonmark +scipy +sphinx<4.0 +sphinx_rtd_theme>=0.3.1 +sphinxcontrib-svg2pdfconverter +sphinxcontrib.programoutput diff --git a/docs/source/backends/hdf5.rst b/docs/source/backends/hdf5.rst index 5d6c21c424..4387b5dcf9 100644 --- a/docs/source/backends/hdf5.rst +++ b/docs/source/backends/hdf5.rst @@ -97,10 +97,9 @@ Known Issues .. warning:: Jul 23th, 2021 (`HDFFV-11260 `__): - Collective HDF5 metadata reads became broken in 1.10.5. - Consider using 1.10.4 if you plan to enable the collective HDF5 metadata operations optimization in openPMD (``OPENPMD_HDF5_COLLECTIVE_METADATA=ON``). - Enabling this feature with a newer version will make HDF5 fall back to the individual metadata operations. - HDF5 plans to fix the issue in the upcoming 1.10.8+ and 1.12.2+ releases, but visit the issue tracker above to see the status of the bug fix. + Collective HDF5 metadata reads (``OPENPMD_HDF5_COLLECTIVE_METADATA=ON``) broke in 1.10.5, falling back to individual metadata operations. + HDF5 releases 1.10.4 and earlier are not affected; versions 1.10.9+, 1.12.2+ and 1.13.1+ fixed the issue. + Selected References ------------------- diff --git a/docs/source/backends/json.rst b/docs/source/backends/json.rst index a02eeee831..752497aa09 100644 --- a/docs/source/backends/json.rst +++ b/docs/source/backends/json.rst @@ -86,4 +86,3 @@ when picking the JSON backend: .. literalinclude:: json_example.json :language: json - diff --git a/docs/source/citation.rst b/docs/source/citation.rst index dca1e51a57..4a049e3c54 100644 --- a/docs/source/citation.rst +++ b/docs/source/citation.rst @@ -130,4 +130,3 @@ Python .format(io.__version__)) print("openPMD-api backend variants: {}" .format(io.variants)) - diff --git a/docs/source/conf.py b/docs/source/conf.py index 8eacfd47ea..11d5eeb701 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,7 +15,9 @@ # import os import subprocess + from recommonmark.parser import CommonMarkParser + # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -83,9 +85,9 @@ # built documents. # # The short X.Y version. -version = u'0.14.4' +version = u'0.14.5' # The full version, including alpha/beta/rc tags. -release = u'0.14.4' +release = u'0.14.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/details/adios2.json b/docs/source/details/adios2.json index 456c83707b..d71061c0bc 100644 --- a/docs/source/details/adios2.json +++ b/docs/source/details/adios2.json @@ -12,8 +12,8 @@ { "type": "blosc", "parameters": { - "clevel": "1", - "doshuffle": "BLOSC_BITSHUFFLE" + "clevel": "1", + "doshuffle": "BLOSC_BITSHUFFLE" } } ] diff --git a/docs/source/details/config_layout.json b/docs/source/details/config_layout.json index 1e86d85876..5b0020b02d 100644 --- a/docs/source/details/config_layout.json +++ b/docs/source/details/config_layout.json @@ -1,6 +1,6 @@ { - "adios": "put ADIOS config here", - "adios2": "put ADIOS2 config here", - "hdf5": "put HDF5 config here", - "json": "put JSON config here" + "adios": "put ADIOS config here", + "adios2": "put ADIOS2 config here", + "hdf5": "put HDF5 config here", + "json": "put JSON config here" } diff --git a/docs/source/dev/sphinx.rst b/docs/source/dev/sphinx.rst index a513f1108f..8cd626c3bf 100644 --- a/docs/source/dev/sphinx.rst +++ b/docs/source/dev/sphinx.rst @@ -7,7 +7,7 @@ In the following section we explain how to contribute to this documentation. If you are reading the HTML version on http://openPMD-api.readthedocs.io and want to improve or correct existing pages, check the "Edit on GitHub" link on the right upper corner of each document. -Alternatively, go to `docs/source` in our source code and follow the directory structure of `reStructuredText`_ (``.rst``) files there. +Alternatively, go to ``docs/source`` in our source code and follow the directory structure of `reStructuredText`_ (``.rst``) files there. For intrusive changes, like structural changes to chapters, please open an issue to discuss them beforehand. .. _reStructuredText: http://www.sphinx-doc.org/en/stable/rest.html @@ -46,7 +46,7 @@ Please check your documentation build is successful and renders as you expected # skip this if you are still in docs/ cd docs/ - # render the `.rst` files and replace their macros within + # render the ``.rst`` files and replace their macros within # enjoy the breathe errors on things it does not understand from doxygen :) make html @@ -66,4 +66,3 @@ Useful Links * `A primer on writing restFUL files for sphinx `_ * `Why You Shouldn't Use "Markdown" for Documentation `_ * `Markdown Limitations in Sphinx `_ - diff --git a/docs/source/index.rst b/docs/source/index.rst index d68bf9607d..925aa36a27 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -42,7 +42,7 @@ openPMD-api version supported openPMD standard versions ======================== =================================== ``2.0.0+`` ``2.0.0+`` (not released yet) ``1.0.0+`` ``1.0.1-1.1.0`` (not released yet) -``0.13.1-0.14.4`` (beta) ``1.0.0-1.1.0`` +``0.13.1-0.14.5`` (beta) ``1.0.0-1.1.0`` ``0.1.0-0.12.0`` (alpha) ``1.0.0-1.1.0`` ======================== =================================== diff --git a/docs/source/install/conda.svg b/docs/source/install/conda.svg index 0755b2f460..643a653202 100644 --- a/docs/source/install/conda.svg +++ b/docs/source/install/conda.svg @@ -1 +1 @@ - \ No newline at end of file + diff --git a/docs/source/install/install.rst b/docs/source/install/install.rst index 3b5f54dcd1..6df73eaae7 100644 --- a/docs/source/install/install.rst +++ b/docs/source/install/install.rst @@ -115,6 +115,8 @@ For some exotic architectures and compilers, you might need to disable a compile # optional: --user python3 -m pip install openpmd-api --no-binary openpmd-api +Additional CMake options can be passed via individual environment variables, which need to be prefixed with ``openPMD_CMAKE_``. + .. _install-cmake: .. only:: html diff --git a/docs/source/maintenance/release.rst b/docs/source/maintenance/release.rst index 660a398283..cf16f8ffbc 100644 --- a/docs/source/maintenance/release.rst +++ b/docs/source/maintenance/release.rst @@ -80,7 +80,7 @@ In order to update the *latest* Doxygen C++ API docs, located under http://www.o # assuming a clean source tree git checkout gh-pages - # stash anything that the regular branches have in `.gitignore` + # stash anything that the regular branches have in ``.gitignore`` git stash --include-untracked # optional first argument is branch/tag on mainline repo, default: dev diff --git a/docs/source/usage/streaming.rst b/docs/source/usage/streaming.rst index adf2d4f4d7..c72c1121db 100644 --- a/docs/source/usage/streaming.rst +++ b/docs/source/usage/streaming.rst @@ -92,4 +92,4 @@ Note that a closed iteration cannot be reopened. This pays tribute to the fact that in streaming mode, an iteration is sent to the sink upon closing it and the data source can no longer modify it. .. literalinclude:: 10_streaming_write.py - :language: python3 \ No newline at end of file + :language: python3 diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000..5cdc6f6f02 --- /dev/null +++ b/environment.yml @@ -0,0 +1,7 @@ +# I am a conda environment, used for our pre-commit hooks +name: openPMD-api-dev +channels: + - conda-forge +dependencies: + - clang-format-12=12.0.1 + - bash=5 diff --git a/examples/10_streaming_read.cpp b/examples/10_streaming_read.cpp index e960447ad5..e271dd4393 100644 --- a/examples/10_streaming_read.cpp +++ b/examples/10_streaming_read.cpp @@ -8,49 +8,48 @@ using std::cout; using namespace openPMD; -int -main() +int main() { #if openPMD_HAVE_ADIOS2 using position_t = double; auto backends = openPMD::getFileExtensions(); - if( std::find( backends.begin(), backends.end(), "sst" ) == backends.end() ) + if (std::find(backends.begin(), backends.end(), "sst") == backends.end()) { std::cout << "SST engine not available in ADIOS2." << std::endl; return 0; } - Series series = Series( "electrons.sst", Access::READ_ONLY ); + Series series = Series("electrons.sst", Access::READ_ONLY); - for( IndexedIteration iteration : series.readIterations() ) + for (IndexedIteration iteration : series.readIterations()) { std::cout << "Current iteration: " << iteration.iterationIndex << std::endl; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; - std::array< std::shared_ptr< position_t >, 3 > loadedChunks; - std::array< Extent, 3 > extents; - std::array< std::string, 3 > const dimensions{ { "x", "y", "z" } }; + Record electronPositions = iteration.particles["e"]["position"]; + std::array, 3> loadedChunks; + std::array extents; + std::array const dimensions{{"x", "y", "z"}}; - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - RecordComponent rc = electronPositions[ dim ]; - loadedChunks[ i ] = rc.loadChunk< position_t >( - Offset( rc.getDimensionality(), 0 ), rc.getExtent() ); - extents[ i ] = rc.getExtent(); + std::string dim = dimensions[i]; + RecordComponent rc = electronPositions[dim]; + loadedChunks[i] = rc.loadChunk( + Offset(rc.getDimensionality(), 0), rc.getExtent()); + extents[i] = rc.getExtent(); } iteration.close(); - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - Extent const & extent = extents[ i ]; + std::string dim = dimensions[i]; + Extent const &extent = extents[i]; std::cout << "\ndim: " << dim << "\n" << std::endl; - auto chunk = loadedChunks[ i ]; - for( size_t j = 0; j < extent[ 0 ]; ++j ) + auto chunk = loadedChunks[i]; + for (size_t j = 0; j < extent[0]; ++j) { - std::cout << chunk.get()[ j ] << ", "; + std::cout << chunk.get()[j] << ", "; } std::cout << "\n----------\n" << std::endl; } diff --git a/examples/10_streaming_read.py b/examples/10_streaming_read.py index f2ff14d6db..5175a29e9f 100755 --- a/examples/10_streaming_read.py +++ b/examples/10_streaming_read.py @@ -1,7 +1,8 @@ #!/usr/bin/env python -import openpmd_api as io import sys +import openpmd_api as io + if __name__ == "__main__": if 'adios2' not in io.variants or not io.variants['adios2']: print('This example requires ADIOS2') diff --git a/examples/10_streaming_write.cpp b/examples/10_streaming_write.cpp index 66b5825b2c..1c12e034f1 100644 --- a/examples/10_streaming_write.cpp +++ b/examples/10_streaming_write.cpp @@ -8,41 +8,39 @@ using std::cout; using namespace openPMD; -int -main() +int main() { #if openPMD_HAVE_ADIOS2 using position_t = double; auto backends = openPMD::getFileExtensions(); - if( std::find( backends.begin(), backends.end(), "sst" ) == backends.end() ) + if (std::find(backends.begin(), backends.end(), "sst") == backends.end()) { std::cout << "SST engine not available in ADIOS2." << std::endl; return 0; } // open file for writing - Series series = Series( "electrons.sst", Access::CREATE ); + Series series = Series("electrons.sst", Access::CREATE); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], [](position_t const *ptr) { delete[] ptr; }); WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 100; ++i ) + for (size_t i = 0; i < 100; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; - std::iota( local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); } iteration.close(); } @@ -54,4 +52,4 @@ main() << std::endl; return 0; #endif -} \ No newline at end of file +} diff --git a/examples/10_streaming_write.py b/examples/10_streaming_write.py index 9ac7e468d2..2ceea706d5 100755 --- a/examples/10_streaming_write.py +++ b/examples/10_streaming_write.py @@ -1,8 +1,9 @@ #!/usr/bin/env python -import openpmd_api as io -import numpy as np import sys +import numpy as np +import openpmd_api as io + if __name__ == "__main__": if 'adios2' not in io.variants or not io.variants['adios2']: print('This example requires ADIOS2') diff --git a/examples/11_particle_dataframe.py b/examples/11_particle_dataframe.py index d3ae34a18d..9b5e626705 100755 --- a/examples/11_particle_dataframe.py +++ b/examples/11_particle_dataframe.py @@ -6,9 +6,11 @@ Authors: Axel Huebl, Dmitry Ganyushin License: LGPLv3+ """ -import openpmd_api as io -import numpy as np import sys + +import numpy as np +import openpmd_api as io + try: import pandas as pd except ImportError: @@ -16,9 +18,9 @@ sys.exit() found_dask = False try: - from dask.delayed import delayed - import dask.array as da import dask + import dask.array as da + from dask.delayed import delayed found_dask = True except ImportError: print("dask NOT found. Install dask to run the 2nd example.") diff --git a/examples/12_span_write.cpp b/examples/12_span_write.cpp index a1162edbb3..6afcb18fe4 100644 --- a/examples/12_span_write.cpp +++ b/examples/12_span_write.cpp @@ -5,31 +5,31 @@ #include // std::iota #include -void span_write( std::string const & filename ) +void span_write(std::string const &filename) { using namespace openPMD; using position_t = double; // open file for writing - Series series = Series( filename, Access::CREATE ); + Series series = Series(filename, Access::CREATE); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent extent = { length }; - Dataset dataset = Dataset( datatype, extent ); + Extent extent = {length}; + Dataset dataset = Dataset(datatype, extent); - std::vector< position_t > fallbackBuffer; + std::vector fallbackBuffer; WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; size_t j = 0; - for( auto const & dim : { "x", "y", "z" } ) + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); /* * This demonstrates the storeChunk() strategy (to be) used in * PIConGPU: @@ -45,16 +45,15 @@ void span_write( std::string const & filename ) * flushed in each iteration to make the buffer reusable. */ bool fallbackBufferIsUsed = false; - auto dynamicMemoryView = pos.storeChunk< position_t >( - Offset{ 0 }, + auto dynamicMemoryView = pos.storeChunk( + Offset{0}, extent, - [ &fallbackBuffer, &fallbackBufferIsUsed ]( size_t size ) - { + [&fallbackBuffer, &fallbackBufferIsUsed](size_t size) { fallbackBufferIsUsed = true; - fallbackBuffer.resize( size ); - return std::shared_ptr< position_t >( - fallbackBuffer.data(), []( auto const * ) {} ); - } ); + fallbackBuffer.resize(size); + return std::shared_ptr( + fallbackBuffer.data(), [](auto const *) {}); + }); /* * ADIOS2 might reallocate its internal buffers when writing @@ -63,21 +62,21 @@ void span_write( std::string const & filename ) * directly before writing. */ auto span = dynamicMemoryView.currentBuffer(); - if( ( i + j ) % 2 == 0 ) + if ((i + j) % 2 == 0) { std::iota( span.begin(), span.end(), - position_t( 3 * i * length + j * length ) ); + position_t(3 * i * length + j * length)); } else { std::iota( span.rbegin(), span.rend(), - position_t( 3 * i * length + j * length ) ); + position_t(3 * i * length + j * length)); } - if( fallbackBufferIsUsed ) + if (fallbackBufferIsUsed) { iteration.seriesFlush(); } @@ -89,12 +88,12 @@ void span_write( std::string const & filename ) int main() { - for( auto const & ext : openPMD::getFileExtensions() ) + for (auto const &ext : openPMD::getFileExtensions()) { - if( ext == "sst" || ext == "ssc" ) + if (ext == "sst" || ext == "ssc") { continue; } - span_write( "../samples/span_write." + ext ); + span_write("../samples/span_write." + ext); } } diff --git a/examples/12_span_write.py b/examples/12_span_write.py index f6c40b6324..c776bd04a7 100644 --- a/examples/12_span_write.py +++ b/examples/12_span_write.py @@ -1,5 +1,5 @@ -import openpmd_api as io import numpy as np +import openpmd_api as io def span_write(filename): diff --git a/examples/1_structure.cpp b/examples/1_structure.cpp index 4cbeeb36c2..906dc53344 100644 --- a/examples/1_structure.cpp +++ b/examples/1_structure.cpp @@ -20,28 +20,33 @@ */ #include - using namespace openPMD; int main() { - /* The root of any openPMD output spans across all data for all iterations is a 'Series'. - * Data is either in a single file or spread across multiple files. */ + /* The root of any openPMD output spans across all data for all iterations + * is a 'Series'. Data is either in a single file or spread across multiple + * files. */ Series series = Series("../samples/1_structure.h5", Access::CREATE); - /* Every element that structures your file (groups and datasets for example) can be annotated with attributes. */ - series.setComment("This string will show up at the root ('/') of the output with key 'comment'."); + /* Every element that structures your file (groups and datasets for example) + * can be annotated with attributes. */ + series.setComment( + "This string will show up at the root ('/') of the output with key " + "'comment'."); - /* Access to individual positions inside happens hierarchically, according to the openPMD standard. - * Creation of new elements happens on access inside the tree-like structure. - * Required attributes are initialized to reasonable defaults for every object. */ + /* Access to individual positions inside happens hierarchically, according + * to the openPMD standard. Creation of new elements happens on access + * inside the tree-like structure. Required attributes are initialized to + * reasonable defaults for every object. */ ParticleSpecies electrons = series.iterations[1].particles["electrons"]; - /* Data to be moved from memory to persistent storage is structured into Records, - * each holding an unbounded number of RecordComponents. - * If a Record only contains a single (scalar) component, it is treated slightly differently. + /* Data to be moved from memory to persistent storage is structured into + * Records, each holding an unbounded number of RecordComponents. If a + * Record only contains a single (scalar) component, it is treated slightly + * differently. * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#scalar-vector-and-tensor-records*/ - Record mass = electrons["mass"]; + Record mass = electrons["mass"]; RecordComponent mass_scalar = mass[RecordComponent::SCALAR]; Dataset dataset = Dataset(Datatype::DOUBLE, Extent{1}); diff --git a/examples/2_read_serial.cpp b/examples/2_read_serial.cpp index 1bea5629a3..5cbea17ed2 100644 --- a/examples/2_read_serial.cpp +++ b/examples/2_read_serial.cpp @@ -20,52 +20,56 @@ */ #include +#include #include #include -#include - using std::cout; using namespace openPMD; int main() { - Series series = Series( - "../samples/git-sample/data%T.h5", - Access::READ_ONLY - ); - cout << "Read a Series with openPMD standard version " - << series.openPMD() << '\n'; + Series series = + Series("../samples/git-sample/data%T.h5", Access::READ_ONLY); + cout << "Read a Series with openPMD standard version " << series.openPMD() + << '\n'; - cout << "The Series contains " << series.iterations.size() << " iterations:"; - for( auto const& i : series.iterations ) + cout << "The Series contains " << series.iterations.size() + << " iterations:"; + for (auto const &i : series.iterations) cout << "\n\t" << i.first; cout << '\n'; Iteration i = series.iterations[100]; cout << "Iteration 100 contains " << i.meshes.size() << " meshes:"; - for( auto const& m : i.meshes ) + for (auto const &m : i.meshes) cout << "\n\t" << m.first; cout << '\n'; - cout << "Iteration 100 contains " << i.particles.size() << " particle species:"; - for( auto const& ps : i.particles ) { + cout << "Iteration 100 contains " << i.particles.size() + << " particle species:"; + for (auto const &ps : i.particles) + { cout << "\n\t" << ps.first; - for( auto const& r : ps.second ) { + for (auto const &r : ps.second) + { cout << "\n\t" << r.first; cout << '\n'; } } openPMD::ParticleSpecies electrons = i.particles["electrons"]; - std::shared_ptr charge = electrons["charge"][openPMD::RecordComponent::SCALAR].loadChunk(); + std::shared_ptr charge = + electrons["charge"][openPMD::RecordComponent::SCALAR] + .loadChunk(); series.flush(); - cout << "And the first electron particle has a charge = " << charge.get()[0]; + cout << "And the first electron particle has a charge = " + << charge.get()[0]; cout << '\n'; MeshRecordComponent E_x = i.meshes["E"]["x"]; Extent extent = E_x.getExtent(); cout << "Field E/x has shape ("; - for( auto const& dim : extent ) + for (auto const &dim : extent) cout << dim << ','; cout << ") and has datatype " << E_x.getDatatype() << '\n'; @@ -77,19 +81,19 @@ int main() series.flush(); cout << "Chunk has been read from disk\n" << "Read chunk contains:\n"; - for( size_t row = 0; row < chunk_extent[0]; ++row ) + for (size_t row = 0; row < chunk_extent[0]; ++row) { - for( size_t col = 0; col < chunk_extent[1]; ++col ) - cout << "\t" - << '(' << row + chunk_offset[0] << '|' << col + chunk_offset[1] << '|' << 1 << ")\t" - << chunk_data.get()[row*chunk_extent[1]+col]; + for (size_t col = 0; col < chunk_extent[1]; ++col) + cout << "\t" << '(' << row + chunk_offset[0] << '|' + << col + chunk_offset[1] << '|' << 1 << ")\t" + << chunk_data.get()[row * chunk_extent[1] + col]; cout << '\n'; } auto all_data = E_x.loadChunk(); series.flush(); cout << "Full E/x starts with:\n\t{"; - for( size_t col = 0; col < extent[1] && col < 5; ++col ) + for (size_t col = 0; col < extent[1] && col < 5; ++col) cout << all_data.get()[col] << ", "; cout << "...}\n"; diff --git a/examples/2_read_serial.py b/examples/2_read_serial.py index 4c1bf84402..9b68db4809 100755 --- a/examples/2_read_serial.py +++ b/examples/2_read_serial.py @@ -8,7 +8,6 @@ """ import openpmd_api as io - if __name__ == "__main__": series = io.Series("../samples/git-sample/data%T.h5", io.Access.read_only) diff --git a/examples/2a_read_thetaMode_serial.cpp b/examples/2a_read_thetaMode_serial.cpp index 3b52984b53..473a6e7d0f 100644 --- a/examples/2a_read_thetaMode_serial.cpp +++ b/examples/2a_read_thetaMode_serial.cpp @@ -20,31 +20,30 @@ */ #include +#include #include #include -#include - using std::cout; using namespace openPMD; int main() { - Series series = Series( - "../samples/git-sample/thetaMode/data%T.h5", - Access::READ_ONLY - ); + Series series = + Series("../samples/git-sample/thetaMode/data%T.h5", Access::READ_ONLY); Iteration i = series.iterations[500]; MeshRecordComponent E_z_modes = i.meshes["E"]["z"]; - Extent extent = E_z_modes.getExtent(); // (modal components, r, z) + Extent extent = E_z_modes.getExtent(); // (modal components, r, z) // read E_z in all modes auto E_z_raw = E_z_modes.loadChunk(); // read E_z in mode_0 (one scalar field) - auto E_z_m0 = E_z_modes.loadChunk(Offset{0, 0, 0}, Extent{1, extent[1], extent[2]}); + auto E_z_m0 = E_z_modes.loadChunk( + Offset{0, 0, 0}, Extent{1, extent[1], extent[2]}); // read E_z in mode_1 (two fields; skip mode_0 with one scalar field) - auto E_z_m1 = E_z_modes.loadChunk(Offset{1, 0, 0}, Extent{2, extent[1], extent[2]}); + auto E_z_m1 = E_z_modes.loadChunk( + Offset{1, 0, 0}, Extent{2, extent[1], extent[2]}); series.flush(); // all this is still mode-decomposed data, not too useful for users @@ -54,8 +53,8 @@ int main() // user change frequency: time ~= component >> theta >> selected modes // thetaMode::ToCylindrical toCylindrical("all"); // thetaMode::ToCylindricalSlice toCylindricalSlice(1.5708, "all") - // reconstruction to 2D slice in cylindrical coordinates (r, z) for a fixed theta - // E_z_90deg = toCylindricalSlice(E_z_modes).loadChunk(); + // reconstruction to 2D slice in cylindrical coordinates (r, z) for a fixed + // theta E_z_90deg = toCylindricalSlice(E_z_modes).loadChunk(); // E_r_90deg = toCylindricalSlice(i.meshes["E"]["r"]).loadChunk(); // E_t_90deg = toCylindricalSlice(i.meshes["E"]["t"]).loadChunk(); // reconstruction to 3D cylindrical coordinates (r, t, z) @@ -64,9 +63,10 @@ int main() // reconstruction to 3D and 2D cartesian: E_x, E_y, E_z // thetaMode::ToCylindrical toCartesian({'x': 1.e-6, 'y': 1.e-6}, "all"); - // ... toCartesianSliceYZ({'x': 1.e-6, 'y': 1.e-6}, 'x', 0., "all"); // and absolute slice position - // E_z_xyz = toCartesian(E_z_modes).loadChunk(); # (x, y, z) - // E_z_yz = toCartesianSliceYZ(E_z_modes).loadChunk(); # (y, z) + // ... toCartesianSliceYZ({'x': 1.e-6, 'y': 1.e-6}, 'x', 0., + // "all"); // and absolute slice position E_z_xyz = + // toCartesian(E_z_modes).loadChunk(); # (x, y, z) E_z_yz = + // toCartesianSliceYZ(E_z_modes).loadChunk(); # (y, z) // series.flush(); /* The files in 'series' are still open until the object is destroyed, on diff --git a/examples/2a_read_thetaMode_serial.py b/examples/2a_read_thetaMode_serial.py index 1a88ac54c0..b2ec25bc20 100755 --- a/examples/2a_read_thetaMode_serial.py +++ b/examples/2a_read_thetaMode_serial.py @@ -8,7 +8,6 @@ """ import openpmd_api as io - if __name__ == "__main__": series = io.Series("../samples/git-sample/thetaMode/data%T.h5", io.Access.read_only) diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index cdd32a9e41..71628bc671 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -20,11 +20,10 @@ */ #include +#include #include #include #include -#include - using std::cout; using namespace openPMD; @@ -35,33 +34,30 @@ int main(int argc, char *argv[]) size_t size = (argc == 2 ? atoi(argv[1]) : 3); // matrix dataset to write with values 0...size*size-1 - std::vector global_data(size*size); + std::vector global_data(size * size); std::iota(global_data.begin(), global_data.end(), 0.); cout << "Set up a 2D square array (" << size << 'x' << size << ") that will be written\n"; // open file for writing - Series series = Series( - "../samples/3_write_serial.h5", - Access::CREATE - ); + Series series = Series("../samples/3_write_serial.h5", Access::CREATE); cout << "Created an empty " << series.iterationEncoding() << " Series\n"; MeshRecordComponent rho = - series - .iterations[1] - .meshes["rho"][MeshRecordComponent::SCALAR]; - cout << "Created a scalar mesh Record with all required openPMD attributes\n"; + series.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR]; + cout << "Created a scalar mesh Record with all required openPMD " + "attributes\n"; Datatype datatype = determineDatatype(shareRaw(global_data)); Extent extent = {size, size}; Dataset dataset = Dataset(datatype, extent); - cout << "Created a Dataset of size " << dataset.extent[0] << 'x' << dataset.extent[1] - << " and Datatype " << dataset.dtype << '\n'; + cout << "Created a Dataset of size " << dataset.extent[0] << 'x' + << dataset.extent[1] << " and Datatype " << dataset.dtype << '\n'; rho.resetDataset(dataset); - cout << "Set the dataset properties for the scalar field rho in iteration 1\n"; + cout << "Set the dataset properties for the scalar field rho in iteration " + "1\n"; series.flush(); cout << "File structure and required attributes have been written\n"; diff --git a/examples/3_write_serial.py b/examples/3_write_serial.py index 46dc536fb2..5aabd2998a 100755 --- a/examples/3_write_serial.py +++ b/examples/3_write_serial.py @@ -6,9 +6,8 @@ Authors: Axel Huebl License: LGPLv3+ """ -import openpmd_api as io import numpy as np - +import openpmd_api as io if __name__ == "__main__": # user input: size of matrix to write, default 3x3 diff --git a/examples/3a_write_thetaMode_serial.cpp b/examples/3a_write_thetaMode_serial.cpp index 87c9315dfb..df7134a9f7 100644 --- a/examples/3a_write_thetaMode_serial.cpp +++ b/examples/3a_write_thetaMode_serial.cpp @@ -26,26 +26,24 @@ #include #include - using namespace openPMD; int main() { // open file for writing - Series series = Series( - "../samples/3_write_thetaMode_serial.h5", - Access::CREATE - ); + Series series = + Series("../samples/3_write_thetaMode_serial.h5", Access::CREATE); // configure and setup geometry unsigned int const num_modes = 5u; - unsigned int const num_fields = 1u + (num_modes-1u) * 2u; // the first mode is purely real + unsigned int const num_fields = + 1u + (num_modes - 1u) * 2u; // the first mode is purely real unsigned int const N_r = 60; unsigned int const N_z = 200; // write values 0...size-1 - std::vector< double > E_r_data(num_fields*N_r*N_z); - std::vector< float > E_t_data(num_fields*N_r*N_z); + std::vector E_r_data(num_fields * N_r * N_z); + std::vector E_t_data(num_fields * N_r * N_z); std::iota(E_r_data.begin(), E_r_data.end(), 0.0); std::iota(E_t_data.begin(), E_t_data.end(), 0.f); @@ -54,42 +52,36 @@ int main() std::string const geometryParameters = geos.str(); Mesh E = series.iterations[0].meshes["E"]; - E.setGeometry( Mesh::Geometry::thetaMode ); - E.setGeometryParameters( geometryParameters ); - E.setDataOrder( Mesh::DataOrder::C ); - E.setGridSpacing( std::vector{1.0, 1.0} ); - E.setGridGlobalOffset( std::vector{0.0, 0.0} ); - E.setGridUnitSI( 1.0 ); - E.setAxisLabels( std::vector< std::string >{"r", "z"} ); - std::map< UnitDimension, double > const unitDimensions{ - {UnitDimension::I, 1.0}, - {UnitDimension::J, 2.0} - }; - E.setUnitDimension( unitDimensions ); + E.setGeometry(Mesh::Geometry::thetaMode); + E.setGeometryParameters(geometryParameters); + E.setDataOrder(Mesh::DataOrder::C); + E.setGridSpacing(std::vector{1.0, 1.0}); + E.setGridGlobalOffset(std::vector{0.0, 0.0}); + E.setGridUnitSI(1.0); + E.setAxisLabels(std::vector{"r", "z"}); + std::map const unitDimensions{ + {UnitDimension::I, 1.0}, {UnitDimension::J, 2.0}}; + E.setUnitDimension(unitDimensions); // write components: E_z, E_r, E_t auto E_z = E["z"]; - E_z.setUnitSI( 10. ); - E_z.setPosition(std::vector< double >{0.0, 0.5}); + E_z.setUnitSI(10.); + E_z.setPosition(std::vector{0.0, 0.5}); // (modes, r, z) see setGeometryParameters - E_z.resetDataset( Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) ); - E_z.makeConstant( static_cast< float >(42.54) ); + E_z.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); + E_z.makeConstant(static_cast(42.54)); // write all modes at once (otherwise iterate over modes and first index auto E_r = E["r"]; - E_r.setUnitSI( 10. ); - E_r.setPosition(std::vector< double >{0.5, 0.0}); - E_r.resetDataset( - Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z}) - ); + E_r.setUnitSI(10.); + E_r.setPosition(std::vector{0.5, 0.0}); + E_r.resetDataset(Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z})); E_r.storeChunk(E_r_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); auto E_t = E["t"]; - E_t.setUnitSI( 10. ); - E_t.setPosition(std::vector< double >{0.0, 0.0}); - E_t.resetDataset( - Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) - ); + E_t.setUnitSI(10.); + E_t.setPosition(std::vector{0.0, 0.0}); + E_t.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); E_t.storeChunk(E_t_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); series.flush(); diff --git a/examples/3a_write_thetaMode_serial.py b/examples/3a_write_thetaMode_serial.py index 7d3f581adf..8570383c42 100755 --- a/examples/3a_write_thetaMode_serial.py +++ b/examples/3a_write_thetaMode_serial.py @@ -6,9 +6,8 @@ Authors: Axel Huebl License: LGPLv3+ """ -import openpmd_api as io import numpy as np - +import openpmd_api as io if __name__ == "__main__": # open file for writing diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index c228bae654..d15dba92c6 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -20,67 +20,63 @@ */ #include +#include #include #include -#include - using namespace openPMD; int main() { // open file for writing - Series series = Series( - "../samples/3b_write_resizable_particles.h5", - Access::CREATE - ); + Series series = + Series("../samples/3b_write_resizable_particles.h5", Access::CREATE); - ParticleSpecies electrons = - series.iterations[0].particles["electrons"]; + ParticleSpecies electrons = series.iterations[0].particles["electrons"]; // our initial data to write - std::vector< double > x{ 0., 1., 2., 3., 4. }; - std::vector< double > y{ -2., -3., -4., -5., -6. }; + std::vector x{0., 1., 2., 3., 4.}; + std::vector y{-2., -3., -4., -5., -6.}; // both x and y the same type, otherwise we use two distinct datasets - Datatype dtype = determineDatatype( shareRaw( x ) ); - Extent size = { x.size() }; - auto dataset = Dataset( dtype, size, "{ \"resizable\": true }" ); + Datatype dtype = determineDatatype(shareRaw(x)); + Extent size = {x.size()}; + auto dataset = Dataset(dtype, size, "{ \"resizable\": true }"); RecordComponent rc_x = electrons["position"]["x"]; RecordComponent rc_y = electrons["position"]["y"]; - rc_x.resetDataset( dataset ); - rc_y.resetDataset( dataset ); + rc_x.resetDataset(dataset); + rc_y.resetDataset(dataset); - Offset offset = { 0 }; - rc_x.storeChunk( x, offset, { x.size() } ); - rc_y.storeChunk( y, offset, { y.size() } ); + Offset offset = {0}; + rc_x.storeChunk(x, offset, {x.size()}); + rc_y.storeChunk(y, offset, {y.size()}); // openPMD allows additional position offsets: set to zero here RecordComponent rc_xo = electrons["positionOffset"]["x"]; RecordComponent rc_yo = electrons["positionOffset"]["y"]; - rc_xo.resetDataset( dataset ); - rc_yo.resetDataset( dataset ); - rc_xo.makeConstant( 0.0 ); - rc_yo.makeConstant( 0.0 ); + rc_xo.resetDataset(dataset); + rc_yo.resetDataset(dataset); + rc_xo.makeConstant(0.0); + rc_yo.makeConstant(0.0); // after this call, the provided data buffers can be used again or deleted series.flush(); // extend and append more particles - x = { 5., 6., 7. }; - y = {-7., -8., -9. }; + x = {5., 6., 7.}; + y = {-7., -8., -9.}; offset.at(0) += dataset.extent.at(0); - dataset = Dataset( { dataset.extent.at(0) + x.size() } ); + dataset = Dataset({dataset.extent.at(0) + x.size()}); - rc_x.resetDataset( dataset ); - rc_y.resetDataset( dataset ); + rc_x.resetDataset(dataset); + rc_y.resetDataset(dataset); - rc_x.storeChunk( x, offset, { x.size() } ); - rc_y.storeChunk( y, offset, { x.size() } ); + rc_x.storeChunk(x, offset, {x.size()}); + rc_y.storeChunk(y, offset, {x.size()}); - rc_xo.resetDataset( dataset ); - rc_yo.resetDataset( dataset ); + rc_xo.resetDataset(dataset); + rc_yo.resetDataset(dataset); // after this call, the provided data buffers can be used again or deleted series.flush(); diff --git a/examples/3b_write_resizable_particles.py b/examples/3b_write_resizable_particles.py index 03dd780b29..eb604deb71 100755 --- a/examples/3b_write_resizable_particles.py +++ b/examples/3b_write_resizable_particles.py @@ -6,9 +6,8 @@ Authors: Axel Huebl License: LGPLv3+ """ -import openpmd_api as io import numpy as np - +import openpmd_api as io if __name__ == "__main__": # open file for writing diff --git a/examples/4_read_parallel.cpp b/examples/4_read_parallel.cpp index a88af11b0e..530b00fe5d 100644 --- a/examples/4_read_parallel.cpp +++ b/examples/4_read_parallel.cpp @@ -22,10 +22,9 @@ #include +#include #include #include -#include - using std::cout; using namespace openPMD; @@ -40,7 +39,6 @@ int main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* note: this scope is intentional to destruct the openPMD::Series object * prior to MPI_Finalize(); */ @@ -48,41 +46,39 @@ int main(int argc, char *argv[]) Series series = Series( "../samples/git-sample/data%T.h5", Access::READ_ONLY, - MPI_COMM_WORLD - ); - if( 0 == mpi_rank ) - cout << "Read a series in parallel with " << mpi_size << " MPI ranks\n"; + MPI_COMM_WORLD); + if (0 == mpi_rank) + cout << "Read a series in parallel with " << mpi_size + << " MPI ranks\n"; MeshRecordComponent E_x = series.iterations[100].meshes["E"]["x"]; Offset chunk_offset = { - static_cast< long unsigned int >(mpi_rank) + 1, - 1, - 1 - }; + static_cast(mpi_rank) + 1, 1, 1}; Extent chunk_extent = {2, 2, 1}; auto chunk_data = E_x.loadChunk(chunk_offset, chunk_extent); - if( 0 == mpi_rank ) - cout << "Queued the loading of a single chunk per MPI rank from disk, " + if (0 == mpi_rank) + cout << "Queued the loading of a single chunk per MPI rank from " + "disk, " "ready to execute\n"; series.flush(); - if( 0 == mpi_rank ) + if (0 == mpi_rank) cout << "Chunks have been read from disk\n"; - for( int i = 0; i < mpi_size; ++i ) + for (int i = 0; i < mpi_size; ++i) { - if( i == mpi_rank ) + if (i == mpi_rank) { cout << "Rank " << mpi_rank << " - Read chunk contains:\n"; - for( size_t row = 0; row < chunk_extent[0]; ++row ) + for (size_t row = 0; row < chunk_extent[0]; ++row) { - for( size_t col = 0; col < chunk_extent[1]; ++col ) - cout << "\t" - << '(' << row + chunk_offset[0] << '|' << col + chunk_offset[1] << '|' << 1 << ")\t" - << chunk_data.get()[row*chunk_extent[1]+col]; + for (size_t col = 0; col < chunk_extent[1]; ++col) + cout << "\t" << '(' << row + chunk_offset[0] << '|' + << col + chunk_offset[1] << '|' << 1 << ")\t" + << chunk_data.get()[row * chunk_extent[1] + col]; cout << std::endl; } } diff --git a/examples/4_read_parallel.py b/examples/4_read_parallel.py index 4956ca78c9..6cfd3b542f 100755 --- a/examples/4_read_parallel.py +++ b/examples/4_read_parallel.py @@ -6,15 +6,13 @@ Authors: Axel Huebl License: LGPLv3+ """ +import openpmd_api as io # IMPORTANT: include mpi4py FIRST # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html # on import: calls MPI_Init_thread() # exit hook: calls MPI_Finalize() from mpi4py import MPI -import openpmd_api as io - - if __name__ == "__main__": # also works with any other MPI communicator comm = MPI.COMM_WORLD diff --git a/examples/5_write_parallel.cpp b/examples/5_write_parallel.cpp index cdbbbe3ec0..b8875504a5 100644 --- a/examples/5_write_parallel.cpp +++ b/examples/5_write_parallel.cpp @@ -24,8 +24,7 @@ #include #include -#include // std::vector - +#include // std::vector using std::cout; using namespace openPMD; @@ -47,51 +46,47 @@ int main(int argc, char *argv[]) // global data set to write: [MPI_Size * 10, 300] // each rank writes a 10x300 slice with its MPI rank as values auto const value = float(mpi_size); - std::vector local_data( - 10 * 300, value); - if( 0 == mpi_rank ) - cout << "Set up a 2D array with 10x300 elements per MPI rank (" << mpi_size - << "x) that will be written to disk\n"; + std::vector local_data(10 * 300, value); + if (0 == mpi_rank) + cout << "Set up a 2D array with 10x300 elements per MPI rank (" + << mpi_size << "x) that will be written to disk\n"; // open file for writing Series series = Series( - "../samples/5_parallel_write.h5", - Access::CREATE, - MPI_COMM_WORLD - ); - if( 0 == mpi_rank ) - cout << "Created an empty series in parallel with " - << mpi_size << " MPI ranks\n"; + "../samples/5_parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); + if (0 == mpi_rank) + cout << "Created an empty series in parallel with " << mpi_size + << " MPI ranks\n"; MeshRecordComponent mymesh = - series - .iterations[1] - .meshes["mymesh"][MeshRecordComponent::SCALAR]; + series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; // example 1D domain decomposition in first index Datatype datatype = determineDatatype(); Extent global_extent = {10ul * mpi_size, 300}; Dataset dataset = Dataset(datatype, global_extent); - if( 0 == mpi_rank ) - cout << "Prepared a Dataset of size " << dataset.extent[0] - << "x" << dataset.extent[1] - << " and Datatype " << dataset.dtype << '\n'; + if (0 == mpi_rank) + cout << "Prepared a Dataset of size " << dataset.extent[0] << "x" + << dataset.extent[1] << " and Datatype " << dataset.dtype + << '\n'; mymesh.resetDataset(dataset); - if( 0 == mpi_rank ) - cout << "Set the global Dataset properties for the scalar field mymesh in iteration 1\n"; + if (0 == mpi_rank) + cout << "Set the global Dataset properties for the scalar field " + "mymesh in iteration 1\n"; // example shows a 1D domain decomposition in first index Offset chunk_offset = {10ul * mpi_rank, 0}; Extent chunk_extent = {10, 300}; mymesh.storeChunk(local_data, chunk_offset, chunk_extent); - if( 0 == mpi_rank ) - cout << "Registered a single chunk per MPI rank containing its contribution, " + if (0 == mpi_rank) + cout << "Registered a single chunk per MPI rank containing its " + "contribution, " "ready to write content to disk\n"; series.flush(); - if( 0 == mpi_rank ) + if (0 == mpi_rank) cout << "Dataset content has been fully written to disk\n"; } diff --git a/examples/5_write_parallel.py b/examples/5_write_parallel.py index 662cb95353..3fada54b7a 100755 --- a/examples/5_write_parallel.py +++ b/examples/5_write_parallel.py @@ -6,16 +6,14 @@ Authors: Axel Huebl License: LGPLv3+ """ +import numpy as np +import openpmd_api as io # IMPORTANT: include mpi4py FIRST # https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html # on import: calls MPI_Init_thread() # exit hook: calls MPI_Finalize() from mpi4py import MPI -import openpmd_api as io -import numpy as np - - if __name__ == "__main__": # also works with any other MPI communicator comm = MPI.COMM_WORLD diff --git a/examples/6_dump_filebased_series.cpp b/examples/6_dump_filebased_series.cpp index 96fbce5b6f..99b2b0939d 100644 --- a/examples/6_dump_filebased_series.cpp +++ b/examples/6_dump_filebased_series.cpp @@ -3,7 +3,6 @@ #include #include - using namespace openPMD; int main() @@ -11,10 +10,10 @@ int main() Series o = Series("../samples/git-sample/data%T.h5", Access::READ_ONLY); std::cout << "Read iterations "; - for( auto const& val : o.iterations ) + for (auto const &val : o.iterations) std::cout << '\t' << val.first; std::cout << "Read attributes in the root:\n"; - for( auto const& val : o.attributes() ) + for (auto const &val : o.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; @@ -28,122 +27,141 @@ int main() << '\n'; std::cout << "Read attributes in basePath:\n"; - for( auto const& a : o.iterations.attributes() ) + for (auto const &a : o.iterations.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; std::cout << "Read iterations in basePath:\n"; - for( auto const& i : o.iterations ) + for (auto const &i : o.iterations) std::cout << '\t' << i.first << '\n'; std::cout << '\n'; - for( auto const& i : o.iterations ) + for (auto const &i : o.iterations) { std::cout << "Read attributes in iteration " << i.first << ":\n"; - for( auto const& val : i.second.attributes() ) + for (auto const &val : i.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::cout << i.first << ".time - " << i.second.time< float >() << '\n' - << i.first << ".dt - " << i.second.dt< float >() << '\n' - << i.first << ".timeUnitSI - " << i.second.timeUnitSI() << '\n' + std::cout << i.first << ".time - " << i.second.time() << '\n' + << i.first << ".dt - " << i.second.dt() << '\n' + << i.first << ".timeUnitSI - " << i.second.timeUnitSI() + << '\n' << '\n'; - std::cout << "Read attributes in meshesPath in iteration " << i.first << ":\n"; - for( auto const& a : i.second.meshes.attributes() ) + std::cout << "Read attributes in meshesPath in iteration " << i.first + << ":\n"; + for (auto const &a : i.second.meshes.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; std::cout << "Read meshes in iteration " << i.first << ":\n"; - for( auto const& m : i.second.meshes ) + for (auto const &m : i.second.meshes) std::cout << '\t' << m.first << '\n'; std::cout << '\n'; - for( auto const& m : i.second.meshes ) + for (auto const &m : i.second.meshes) { - std::cout << "Read attributes for mesh " << m.first << " in iteration " << i.first << ":\n"; - for( auto const& val : m.second.attributes() ) + std::cout << "Read attributes for mesh " << m.first + << " in iteration " << i.first << ":\n"; + for (auto const &val : m.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; std::string meshPrefix = std::to_string(i.first) + '.' + m.first; std::string axisLabels = ""; - for( auto const& val : m.second.axisLabels() ) + for (auto const &val : m.second.axisLabels()) axisLabels += val + ", "; std::string gridSpacing = ""; - for( auto const& val : m.second.gridSpacing< float >() ) + for (auto const &val : m.second.gridSpacing()) gridSpacing += std::to_string(val) + ", "; std::string gridGlobalOffset = ""; - for( auto const& val : m.second.gridGlobalOffset() ) + for (auto const &val : m.second.gridGlobalOffset()) gridGlobalOffset += std::to_string(val) + ", "; std::string unitDimension = ""; - for( auto const& val : m.second.unitDimension() ) + for (auto const &val : m.second.unitDimension()) unitDimension += std::to_string(val) + ", "; - std::cout << meshPrefix << ".geometry - " << m.second.geometry() << '\n' - << meshPrefix << ".dataOrder - " << m.second.dataOrder() << '\n' + std::cout << meshPrefix << ".geometry - " << m.second.geometry() + << '\n' + << meshPrefix << ".dataOrder - " << m.second.dataOrder() + << '\n' << meshPrefix << ".axisLabels - " << axisLabels << '\n' << meshPrefix << ".gridSpacing - " << gridSpacing << '\n' - << meshPrefix << ".gridGlobalOffset - " << gridGlobalOffset << '\n' - << meshPrefix << ".gridUnitSI - " << m.second.gridUnitSI() << '\n' - << meshPrefix << ".unitDimension - " << unitDimension << '\n' - << meshPrefix << ".timeOffset - " << m.second.timeOffset< float >() << '\n' + << meshPrefix << ".gridGlobalOffset - " + << gridGlobalOffset << '\n' + << meshPrefix << ".gridUnitSI - " << m.second.gridUnitSI() + << '\n' + << meshPrefix << ".unitDimension - " << unitDimension + << '\n' + << meshPrefix << ".timeOffset - " + << m.second.timeOffset() << '\n' << '\n'; std::cout << "Read recordComponents for mesh " << m.first << ":\n"; - for( auto const& rc : m.second ) + for (auto const &rc : m.second) std::cout << '\t' << rc.first << '\n'; std::cout << '\n'; - for( auto const& rc : m.second ) + for (auto const &rc : m.second) { - std::cout << "Read attributes for recordComponent " << rc.first << " for mesh " << m.first << '\n'; - for( auto const& val : rc.second.attributes() ) + std::cout << "Read attributes for recordComponent " << rc.first + << " for mesh " << m.first << '\n'; + for (auto const &val : rc.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::string componentPrefix = std::to_string(i.first) + '.' + m.first + '.' + rc.first; + std::string componentPrefix = + std::to_string(i.first) + '.' + m.first + '.' + rc.first; std::string position = ""; - for( auto const& val : rc.second.position< double >() ) + for (auto const &val : rc.second.position()) position += std::to_string(val) + ", "; - std::cout << componentPrefix << ".unitSI - " << rc.second.unitSI() << '\n' - << componentPrefix << ".position - " << position << '\n' + std::cout << componentPrefix << ".unitSI - " + << rc.second.unitSI() << '\n' + << componentPrefix << ".position - " << position + << '\n' << '\n'; } } - std::cout << "Read attributes in particlesPath in iteration " << i.first << ":\n"; - for( auto const& a : i.second.particles.attributes() ) + std::cout << "Read attributes in particlesPath in iteration " << i.first + << ":\n"; + for (auto const &a : i.second.particles.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; std::cout << "Read particleSpecies in iteration " << i.first << ":\n"; - for( auto const& val : i.second.particles ) + for (auto const &val : i.second.particles) std::cout << '\t' << val.first << '\n'; std::cout << '\n'; - for( auto const& p : i.second.particles ) + for (auto const &p : i.second.particles) { - std::cout << "Read attributes for particle species " << p.first << " in iteration " << i.first << ":\n"; - for( auto const& val : p.second.attributes() ) + std::cout << "Read attributes for particle species " << p.first + << " in iteration " << i.first << ":\n"; + for (auto const &val : p.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::cout << "Read particle records for particle species " << p.first << " in iteration " << i.first << ":\n"; - for( auto const& r : p.second ) + std::cout << "Read particle records for particle species " + << p.first << " in iteration " << i.first << ":\n"; + for (auto const &r : p.second) std::cout << '\t' << r.first << '\n'; std::cout << '\n'; - for( auto const& r : p.second ) + for (auto const &r : p.second) { - std::cout << "Read recordComponents for particle record " << r.first << ":\n"; - for( auto const& rc : r.second ) + std::cout << "Read recordComponents for particle record " + << r.first << ":\n"; + for (auto const &rc : r.second) std::cout << '\t' << rc.first << '\n'; std::cout << '\n'; - for( auto const& rc : r.second ) + for (auto const &rc : r.second) { - std::cout << "Read attributes for recordComponent " << rc.first << " for particle record " << r.first << '\n'; - for( auto const& val : rc.second.attributes() ) + std::cout << "Read attributes for recordComponent " + << rc.first << " for particle record " << r.first + << '\n'; + for (auto const &val : rc.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; } diff --git a/examples/7_extended_write_serial.cpp b/examples/7_extended_write_serial.cpp index 5fa62add1e..8452de16b0 100644 --- a/examples/7_extended_write_serial.cpp +++ b/examples/7_extended_write_serial.cpp @@ -3,45 +3,48 @@ #include #include - -int -main() +int main() { namespace io = openPMD; { - auto f = io::Series("working/directory/2D_simData.h5", io::Access::CREATE); + auto f = + io::Series("working/directory/2D_simData.h5", io::Access::CREATE); - // all required openPMD attributes will be set to reasonable default values (all ones, all zeros, empty strings,...) - // manually setting them enforces the openPMD standard + // all required openPMD attributes will be set to reasonable default + // values (all ones, all zeros, empty strings,...) manually setting them + // enforces the openPMD standard f.setMeshesPath("custom_meshes_path"); f.setParticlesPath("long_and_very_custom_particles_path"); // it is possible to add and remove attributes f.setComment("This is fine and actually encouraged by the standard"); f.setAttribute( - "custom_attribute_name", - std::string("This attribute is manually added and can contain about any datatype you would want") - ); - // note that removing attributes required by the standard typically makes the file unusable for post-processing + "custom_attribute_name", + std::string("This attribute is manually added and can contain " + "about any datatype you would want")); + // note that removing attributes required by the standard typically + // makes the file unusable for post-processing f.deleteAttribute("custom_attribute_name"); - // everything that is accessed with [] should be interpreted as permanent storage - // the objects sunk into these locations are deep copies + // everything that is accessed with [] should be interpreted as + // permanent storage the objects sunk into these locations are deep + // copies { - // setting attributes can be chained in JS-like syntax for compact code - f.iterations[1] - .setTime(42.0) - .setDt(1.0) - .setTimeUnitSI(1.39e-16); - f.iterations[2].setComment("This iteration will not appear in any output"); + // setting attributes can be chained in JS-like syntax for compact + // code + f.iterations[1].setTime(42.0).setDt(1.0).setTimeUnitSI(1.39e-16); + f.iterations[2].setComment( + "This iteration will not appear in any output"); f.iterations.erase(2); } { // everything is a reference io::Iteration reference = f.iterations[1]; - reference.setComment("Modifications to a copied iteration refer to the same iteration"); + reference.setComment( + "Modifications to a copied iteration refer to the same " + "iteration"); } f.iterations[1].deleteAttribute("comment"); @@ -50,16 +53,19 @@ main() // the underlying concept for numeric data is the openPMD Record // https://github.com/openPMD/openPMD-standard/blob/1.0.1/STANDARD.md#scalar-vector-and-tensor-records // Meshes are specialized records - cur_it.meshes["generic_2D_field"].setUnitDimension({{io::UnitDimension::L, -3}, - {io::UnitDimension::M, 1}}); + cur_it.meshes["generic_2D_field"].setUnitDimension( + {{io::UnitDimension::L, -3}, {io::UnitDimension::M, 1}}); { - // copies of objects are handles/references to the same underlying object + // copies of objects are handles/references to the same underlying + // object io::Mesh lowRez = cur_it.meshes["generic_2D_field"]; - lowRez.setGridSpacing(std::vector{6, 1}).setGridGlobalOffset({0, 600}); + lowRez.setGridSpacing(std::vector{6, 1}) + .setGridGlobalOffset({0, 600}); io::Mesh highRez = cur_it.meshes["generic_2D_field"]; - highRez.setGridSpacing(std::vector{6, 0.5}).setGridGlobalOffset({0, 1200}); + highRez.setGridSpacing(std::vector{6, 0.5}) + .setGridGlobalOffset({0, 1200}); cur_it.meshes.erase("generic_2D_field"); cur_it.meshes["lowRez_2D_field"] = lowRez; @@ -70,26 +76,32 @@ main() { // particles are handled very similar io::ParticleSpecies electrons = cur_it.particles["electrons"]; - electrons.setAttribute("NoteWorthyParticleSpeciesProperty", - std::string("Observing this species was a blast.")); - electrons["displacement"].setUnitDimension({{io::UnitDimension::M, 1}}); + electrons.setAttribute( + "NoteWorthyParticleSpeciesProperty", + std::string("Observing this species was a blast.")); + electrons["displacement"].setUnitDimension( + {{io::UnitDimension::M, 1}}); electrons["displacement"]["x"].setUnitSI(1e-6); electrons.erase("displacement"); - electrons["weighting"][io::RecordComponent::SCALAR].makeConstant(1.e-5); + electrons["weighting"][io::RecordComponent::SCALAR].makeConstant( + 1.e-5); } io::Mesh mesh = cur_it.meshes["lowRez_2D_field"]; mesh.setAxisLabels({"x", "y"}); - // data is assumed to reside behind a pointer as a contiguous column-major array - // shared data ownership during IO is indicated with a smart pointer - std::shared_ptr partial_mesh(new double[5], [](double const *p) { - delete[] p; - p = nullptr; - }); - - // before storing record data, you must specify the dataset once per component - // this describes the datatype and shape of data as it should be written to disk + // data is assumed to reside behind a pointer as a contiguous + // column-major array shared data ownership during IO is indicated with + // a smart pointer + std::shared_ptr partial_mesh( + new double[5], [](double const *p) { + delete[] p; + p = nullptr; + }); + + // before storing record data, you must specify the dataset once per + // component this describes the datatype and shape of data as it should + // be written to disk io::Datatype dtype = io::determineDatatype(partial_mesh); auto d = io::Dataset(dtype, io::Extent{2, 5}); d.setCompression("zlib", 9); @@ -99,56 +111,68 @@ main() io::ParticleSpecies electrons = cur_it.particles["electrons"]; io::Extent mpiDims{4}; - std::shared_ptr partial_particlePos(new float[2], [](float const *p) { - delete[] p; - p = nullptr; - }); + std::shared_ptr partial_particlePos( + new float[2], [](float const *p) { + delete[] p; + p = nullptr; + }); dtype = io::determineDatatype(partial_particlePos); d = io::Dataset(dtype, mpiDims); electrons["position"]["x"].resetDataset(d); - std::shared_ptr partial_particleOff(new uint64_t[2], [](uint64_t const *p) { - delete[] p; - p = nullptr; - }); + std::shared_ptr partial_particleOff( + new uint64_t[2], [](uint64_t const *p) { + delete[] p; + p = nullptr; + }); dtype = io::determineDatatype(partial_particleOff); d = io::Dataset(dtype, mpiDims); electrons["positionOffset"]["x"].resetDataset(d); auto dset = io::Dataset(io::determineDatatype(), {2}); - electrons.particlePatches["numParticles"][io::RecordComponent::SCALAR].resetDataset(dset); - electrons.particlePatches["numParticlesOffset"][io::RecordComponent::SCALAR].resetDataset(dset); + electrons.particlePatches["numParticles"][io::RecordComponent::SCALAR] + .resetDataset(dset); + electrons + .particlePatches["numParticlesOffset"][io::RecordComponent::SCALAR] + .resetDataset(dset); dset = io::Dataset(io::Datatype::FLOAT, {2}); - electrons.particlePatches["offset"].setUnitDimension({{io::UnitDimension::L, 1}}); + electrons.particlePatches["offset"].setUnitDimension( + {{io::UnitDimension::L, 1}}); electrons.particlePatches["offset"]["x"].resetDataset(dset); - electrons.particlePatches["extent"].setUnitDimension({{io::UnitDimension::L, 1}}); + electrons.particlePatches["extent"].setUnitDimension( + {{io::UnitDimension::L, 1}}); electrons.particlePatches["extent"]["x"].resetDataset(dset); - // at any point in time you may decide to dump already created output to disk - // note that this will make some operations impossible (e.g. renaming files) + // at any point in time you may decide to dump already created output to + // disk note that this will make some operations impossible (e.g. + // renaming files) f.flush(); // chunked writing of the final dataset at a time is supported // this loop writes one row at a time - double mesh_x[2][5] = {{1, 3, 5, 7, 9}, - {11, 13, 15, 17, 19}}; + double mesh_x[2][5] = {{1, 3, 5, 7, 9}, {11, 13, 15, 17, 19}}; float particle_position[4] = {0.1f, 0.2f, 0.3f, 0.4f}; uint64_t particle_positionOffset[4] = {0u, 1u, 2u, 3u}; - for (uint64_t i = 0u; i < 2u; ++i) { + for (uint64_t i = 0u; i < 2u; ++i) + { for (int col = 0; col < 5; ++col) partial_mesh.get()[col] = mesh_x[i][col]; io::Offset o = io::Offset{i, 0}; io::Extent e = io::Extent{1, 5}; mesh["x"].storeChunk(partial_mesh, o, e); - // operations between store and flush MUST NOT modify the pointed-to data + // operations between store and flush MUST NOT modify the pointed-to + // data f.flush(); - // after the flush completes successfully, access to the shared resource is returned to the caller + // after the flush completes successfully, access to the shared + // resource is returned to the caller - for (int idx = 0; idx < 2; ++idx) { + for (int idx = 0; idx < 2; ++idx) + { partial_particlePos.get()[idx] = particle_position[idx + 2 * i]; - partial_particleOff.get()[idx] = particle_positionOffset[idx + 2 * i]; + partial_particleOff.get()[idx] = + particle_positionOffset[idx + 2 * i]; } uint64_t numParticlesOffset = 2 * i; @@ -157,14 +181,23 @@ main() o = io::Offset{numParticlesOffset}; e = io::Extent{numParticles}; electrons["position"]["x"].storeChunk(partial_particlePos, o, e); - electrons["positionOffset"]["x"].storeChunk(partial_particleOff, o, e); - - electrons.particlePatches["numParticles"][io::RecordComponent::SCALAR].store(i, numParticles); - electrons.particlePatches["numParticlesOffset"][io::RecordComponent::SCALAR].store(i, numParticlesOffset); - - electrons.particlePatches["offset"]["x"].store(i, particle_position[numParticlesOffset]); - electrons.particlePatches["extent"]["x"].store(i, particle_position[numParticlesOffset + numParticles - 1] - - particle_position[numParticlesOffset]); + electrons["positionOffset"]["x"].storeChunk( + partial_particleOff, o, e); + + electrons + .particlePatches["numParticles"][io::RecordComponent::SCALAR] + .store(i, numParticles); + electrons + .particlePatches["numParticlesOffset"] + [io::RecordComponent::SCALAR] + .store(i, numParticlesOffset); + + electrons.particlePatches["offset"]["x"].store( + i, particle_position[numParticlesOffset]); + electrons.particlePatches["extent"]["x"].store( + i, + particle_position[numParticlesOffset + numParticles - 1] - + particle_position[numParticlesOffset]); } mesh["y"].resetDataset(d); @@ -176,7 +209,8 @@ main() /* The files in 'f' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. - * When running out of scope on return, the 'Series' destructor is called. + * When running out of scope on return, the 'Series' destructor is + * called. */ } diff --git a/examples/7_extended_write_serial.py b/examples/7_extended_write_serial.py index dc23d8d427..df4e710c8c 100755 --- a/examples/7_extended_write_serial.py +++ b/examples/7_extended_write_serial.py @@ -6,10 +6,9 @@ Authors: Axel Huebl, Fabian Koller License: LGPLv3+ """ -from openpmd_api import Series, Access, Dataset, Mesh_Record_Component, \ - Unit_Dimension import numpy as np - +from openpmd_api import (Access, Dataset, Mesh_Record_Component, Series, + Unit_Dimension) SCALAR = Mesh_Record_Component.SCALAR diff --git a/examples/8_benchmark_parallel.cpp b/examples/8_benchmark_parallel.cpp index 235576f3f0..da5c2af42f 100644 --- a/examples/8_benchmark_parallel.cpp +++ b/examples/8_benchmark_parallel.cpp @@ -1,25 +1,24 @@ -#include #include -#include #include +#include +#include #if openPMD_HAVE_MPI -# include +#include #endif #include #include #include - #if openPMD_HAVE_MPI -inline void -print_help( std::string const program_name ) +inline void print_help(std::string const program_name) { std::cout << "Usage: " << program_name << "\n"; std::cout << "Run a simple parallel write and read benchmark.\n\n"; std::cout << "Options:\n"; - std::cout << " -w, --weak run a weak scaling (default: strong scaling)\n"; + std::cout + << " -w, --weak run a weak scaling (default: strong scaling)\n"; std::cout << " -h, --help display this help and exit\n"; std::cout << " -v, --version output version information and exit\n"; std::cout << "\n"; @@ -28,49 +27,49 @@ print_help( std::string const program_name ) std::cout << " " << program_name << " # for a strong scaling\n"; } -inline void -print_version( std::string const program_name ) +inline void print_version(std::string const program_name) { - std::cout << program_name << " (openPMD-api) " - << openPMD::getVersion() << "\n"; + std::cout << program_name << " (openPMD-api) " << openPMD::getVersion() + << "\n"; std::cout << "Copyright 2017-2021 openPMD contributors\n"; std::cout << "Authors: Franz Poeschel, Axel Huebl et al.\n"; std::cout << "License: LGPLv3+\n"; - std::cout << "This is free software: you are free to change and redistribute it.\n" + std::cout << "This is free software: you are free to change and " + "redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.\n"; } -int main( - int argc, - char *argv[] -) +int main(int argc, char *argv[]) { using namespace std; - MPI_Init( - &argc, - &argv - ); + MPI_Init(&argc, &argv); // CLI parsing - std::vector< std::string > str_argv; - for( int i = 0; i < argc; ++i ) str_argv.emplace_back( argv[i] ); + std::vector str_argv; + for (int i = 0; i < argc; ++i) + str_argv.emplace_back(argv[i]); bool weak_scaling = false; - for (int c = 1; c < int(argc); c++) { - if (std::string("--help") == argv[c] || std::string("-h") == argv[c]) { + for (int c = 1; c < int(argc); c++) + { + if (std::string("--help") == argv[c] || std::string("-h") == argv[c]) + { print_help(argv[0]); return 0; } - if (std::string("--version") == argv[c] || std::string("-v") == argv[c]) { + if (std::string("--version") == argv[c] || std::string("-v") == argv[c]) + { print_version(argv[0]); return 0; } - if (std::string("--weak") == argv[c] || std::string("-w") == argv[c]) { + if (std::string("--weak") == argv[c] || std::string("-w") == argv[c]) + { weak_scaling = true; } } - if (argc > 2) { + if (argc > 2) + { std::cerr << "Too many arguments! See: " << argv[0] << " --help\n"; return 1; } @@ -85,53 +84,46 @@ int main( openPMD::Datatype dt = openPMD::determineDatatype(); #endif - int rank, size; - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - MPI_Comm_size( MPI_COMM_WORLD, &size ); - const unsigned scale_up = weak_scaling ? unsigned( size ) : 1u; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + const unsigned scale_up = weak_scaling ? unsigned(size) : 1u; // Total (in this case 3D) dataset across all MPI ranks. // Will be the same for all configured benchmarks. - openPMD::Extent total{ - 100 * scale_up, - 100, - 1000 - }; - - // The blockslicer assigns to each rank its part of the dataset. The rank will - // write to and read from that part. OneDimensionalBlockSlicer is a simple - // implementation of the BlockSlicer abstract class that will divide the - // dataset into hyperslab along one given dimension. - // If you wish to partition your dataset in a different manner, you can - // replace this with your own implementation of BlockSlicer. + openPMD::Extent total{100 * scale_up, 100, 1000}; + + // The blockslicer assigns to each rank its part of the dataset. The rank + // will write to and read from that part. OneDimensionalBlockSlicer is a + // simple implementation of the BlockSlicer abstract class that will divide + // the dataset into hyperslab along one given dimension. If you wish to + // partition your dataset in a different manner, you can replace this with + // your own implementation of BlockSlicer. auto blockSlicer = std::make_shared(0); - // Set up the DatasetFiller. The benchmarks will later inquire the DatasetFiller - // to get data for writing. - std::uniform_int_distribution distr( - 0, - 200000000 - ); + // Set up the DatasetFiller. The benchmarks will later inquire the + // DatasetFiller to get data for writing. + std::uniform_int_distribution distr(0, 200000000); openPMD::RandomDatasetFiller df{distr}; // The Benchmark class will in principle allow a user to configure // runs that write and read different datatypes. - // For this, the class is templated with a type called DatasetFillerProvider. - // This class serves as a factory for DatasetFillers for concrete types and - // should have a templated operator()() returning a value - // that can be dynamically casted to a std::shared_ptr> - // The openPMD API provides only one implementation of a DatasetFillerProvider, - // namely the SimpleDatasetFillerProvider being used in this example. - // Its purpose is to leverage a DatasetFiller for a concrete type (df in this example) - // to a DatasetFillerProvider whose operator()() will fail during runtime if T does - // not correspond with the underlying DatasetFiller. - // Use this implementation if you only wish to run the benchmark for one Datatype, + // For this, the class is templated with a type called + // DatasetFillerProvider. This class serves as a factory for DatasetFillers + // for concrete types and should have a templated operator()() returning + // a value that can be dynamically casted to a + // std::shared_ptr> The openPMD API provides only + // one implementation of a DatasetFillerProvider, namely the + // SimpleDatasetFillerProvider being used in this example. Its purpose is to + // leverage a DatasetFiller for a concrete type (df in this example) to a + // DatasetFillerProvider whose operator()() will fail during runtime if T + // does not correspond with the underlying DatasetFiller. Use this + // implementation if you only wish to run the benchmark for one Datatype, // otherwise provide your own implementation of DatasetFillerProvider. openPMD::SimpleDatasetFillerProvider dfp{df}; - // Create the Benchmark object. The file name (first argument) will be extended - // with the backends' file extensions. + // Create the Benchmark object. The file name (first argument) will be + // extended with the backends' file extensions. openPMD::MPIBenchmark benchmark{ "../benchmarks/benchmark", total, @@ -139,13 +131,15 @@ int main( dfp, }; - // Add benchmark runs to be executed. This will only store the configuration and not - // run the benchmark yet. Each run is configured by: - // * The compression scheme to use (first two parameters). The first parameter chooses + // Add benchmark runs to be executed. This will only store the configuration + // and not run the benchmark yet. Each run is configured by: + // * The compression scheme to use (first two parameters). The first + // parameter chooses // the compression scheme, the second parameter is the compression level. // * The backend (by file extension). // * The datatype to use for this run. - // * The number of iterations. Effectively, the benchmark will be repeated for this many + // * The number of iterations. Effectively, the benchmark will be repeated + // for this many // times. #if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 benchmark.addConfiguration("", 0, "bp", dt, 10); @@ -154,31 +148,29 @@ int main( benchmark.addConfiguration("", 0, "h5", dt, 10); #endif - // Execute all previously configured benchmarks. Will return a MPIBenchmarkReport object - // with write and read times for each configured run. - // Take notice that results will be collected into the root rank's report object, the other - // ranks' reports will be empty. The root rank is specified by the first parameter of runBenchmark, - // the default being 0. - auto res = - benchmark.runBenchmark(); + // Execute all previously configured benchmarks. Will return a + // MPIBenchmarkReport object with write and read times for each configured + // run. Take notice that results will be collected into the root rank's + // report object, the other ranks' reports will be empty. The root rank is + // specified by the first parameter of runBenchmark, the default being 0. + auto res = benchmark.runBenchmark(); - if( rank == 0 ) + if (rank == 0) { - for( auto it = res.durations.begin(); - it != res.durations.end(); - it++ ) + for (auto it = res.durations.begin(); it != res.durations.end(); it++) { auto time = it->second; std::cout << "on rank " << std::get(it->first) - << "\t with backend " - << std::get(it->first) + << "\t with backend " << std::get(it->first) << "\twrite time: " << std::chrono::duration_cast( - time.first - ).count() << "\tread time: " + time.first) + .count() + << "\tread time: " << std::chrono::duration_cast( - time.second - ).count() << std::endl; + time.second) + .count() + << std::endl; } } diff --git a/examples/8a_benchmark_write_parallel.cpp b/examples/8a_benchmark_write_parallel.cpp index 3569381f4e..fbacd7e7ea 100644 --- a/examples/8a_benchmark_write_parallel.cpp +++ b/examples/8a_benchmark_write_parallel.cpp @@ -18,34 +18,34 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include #include +#include #include +#include #include +#include #include -#include -#include -#include #include -#include +#include #include +#include #if openPMD_HAVE_ADIOS2 -# include +#include #endif using std::cout; using namespace openPMD; - /** The Memory profiler class for profiling purpose * * Simple Memory usage report that works on linux system */ -static std::chrono::time_point m_ProgStart = std::chrono::system_clock::now(); +static std::chrono::time_point m_ProgStart = + std::chrono::system_clock::now(); class MemoryProfiler { @@ -55,56 +55,60 @@ class MemoryProfiler * @param[in] rank MPI rank * @param[in] tag item name to measure */ - MemoryProfiler(int rank, const std::string& tag) { - m_Rank = rank; + MemoryProfiler(int rank, const std::string &tag) + { + m_Rank = rank; #if defined(__linux) - //m_Name = "/proc/meminfo"; - m_Name = "/proc/self/status"; - Display(tag); + // m_Name = "/proc/meminfo"; + m_Name = "/proc/self/status"; + Display(tag); #else - (void)tag; - m_Name = ""; + (void)tag; + m_Name = ""; #endif - } + } /** * - * Read from /proc/self/status and display the Virtual Memory info at rank 0 on console + * Read from /proc/self/status and display the Virtual Memory info at rank 0 + * on console * * @param tag item name to measure * @param rank MPI rank */ - void Display(const std::string& tag){ - if (0 == m_Name.size()) - return; + void Display(const std::string &tag) + { + if (0 == m_Name.size()) + return; - if (m_Rank > 0) - return; + if (m_Rank > 0) + return; - std::cout<<" memory at: "<( m_End - m_Start ).count(); - double secs = millis/1000.0; - if( m_Rank > 0 ) - return; + double millis = std::chrono::duration_cast( + m_End - m_Start) + .count(); + double secs = millis / 1000.0; + if (m_Rank > 0) + return; std::cout << " [" << m_Tag << "] took:" << secs << " seconds\n"; - std::cout<<" " << m_Tag <<" From ProgStart in seconds "<< - std::chrono::duration_cast(m_End - m_ProgStart).count()/1000.0<( + m_End - m_ProgStart) + .count() / + 1000.0 + << std::endl; + + std::cout << std::endl; } + private: std::chrono::time_point m_Start; std::chrono::time_point m_End; @@ -150,7 +163,6 @@ class Timer int m_Rank = 0; }; - /** createData * generate a shared ptr of given size with given type & default value * @@ -161,32 +173,32 @@ class Timer * */ -template -std::shared_ptr< T > createData(const unsigned long& size, const T& val, const T& increment) - { - auto E = std::shared_ptr< T > { - new T[size], []( T * d ) {delete[] d;} - }; +template +std::shared_ptr +createData(const unsigned long &size, const T &val, const T &increment) +{ + auto E = std::shared_ptr{new T[size], [](T *d) { delete[] d; }}; - for(unsigned long i = 0ul; i < size; i++ ) - { - if (increment != 0) - //E.get()[i] = val+i; - E.get()[i] = val+i*increment; - else - E.get()[i] = val; - } + for (unsigned long i = 0ul; i < size; i++) + { + if (increment != 0) + // E.get()[i] = val+i; + E.get()[i] = val + i * increment; + else + E.get()[i] = val; + } return E; - } +} /** Find supported backends * (looking for ADIOS2 or H5) * */ -std::vector getBackends() { +std::vector getBackends() +{ std::vector res; #if openPMD_HAVE_ADIOS2 - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) != "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") != "ADIOS1") res.emplace_back(".bp"); #endif @@ -206,288 +218,315 @@ class TestInput; class AbstractPattern { public: - AbstractPattern(const TestInput& input); - virtual bool setLayOut(int step) = 0; - unsigned long getNthMeshExtent( unsigned int n, Offset& offset, Extent& count ); - virtual void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) = 0; - unsigned int getNumBlocks(); - - unsigned long getTotalNumParticles(); - void run(); - void store(Series& series, int step); - void storeMesh(Series& series, int step, const std::string& fieldName, const std::string& compName); - void storeParticles( ParticleSpecies& currSpecies, int& step ); - - unsigned long countMe(const Extent& count); - unsigned long indexMe(const Offset& count); - - Extent m_GlobalMesh; - Extent m_MinBlock; - const TestInput& m_Input; - - Extent m_GlobalUnitMesh; - std::vector> m_InRankMeshLayout; - - void PrintMe(); -}; // class Abstractpatter - - + AbstractPattern(const TestInput &input); + virtual bool setLayOut(int step) = 0; + unsigned long + getNthMeshExtent(unsigned int n, Offset &offset, Extent &count); + virtual void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) = 0; + unsigned int getNumBlocks(); + + unsigned long getTotalNumParticles(); + void run(); + void store(Series &series, int step); + void storeMesh( + Series &series, + int step, + const std::string &fieldName, + const std::string &compName); + void storeParticles(ParticleSpecies &currSpecies, int &step); + + unsigned long countMe(const Extent &count); + unsigned long indexMe(const Offset &count); + + Extent m_GlobalMesh; + Extent m_MinBlock; + const TestInput &m_Input; + + Extent m_GlobalUnitMesh; + std::vector> m_InRankMeshLayout; + + void PrintMe(); +}; // class Abstractpatter /* * Class defining 1D mesh layout * */ -class OneDimPattern: public AbstractPattern +class OneDimPattern : public AbstractPattern { public: - OneDimPattern(const TestInput& input); - bool setLayOut(int step) override; - unsigned long getNthMeshExtent( unsigned int n, Offset& offset, Extent& count ); - void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) override; - unsigned int getNumBlocks(); + OneDimPattern(const TestInput &input); + bool setLayOut(int step) override; + unsigned long + getNthMeshExtent(unsigned int n, Offset &offset, Extent &count); + void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) override; + unsigned int getNumBlocks(); }; /* * Class defining 2D mesh layout * */ -class TwoDimPattern: public AbstractPattern +class TwoDimPattern : public AbstractPattern { public: - TwoDimPattern(const TestInput& input); + TwoDimPattern(const TestInput &input); - bool setLayOut(int step) override; - void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) override; - void coordinate(unsigned long idx, const Extent& grid, Offset& o); + bool setLayOut(int step) override; + void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) override; + void coordinate(unsigned long idx, const Extent &grid, Offset &o); - Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh + Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh - - std::vector> m_InRankParticleLayout; + std::vector> m_InRankParticleLayout; }; /* * Class defining 3D mesh layout * */ -class ThreeDimPattern: public AbstractPattern +class ThreeDimPattern : public AbstractPattern { public: - ThreeDimPattern(const TestInput& input); + ThreeDimPattern(const TestInput &input); - bool setLayOut(int step) override; - void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) override; - void coordinate(unsigned long idx, const Extent& grid, Offset& o); + bool setLayOut(int step) override; + void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) override; + void coordinate(unsigned long idx, const Extent &grid, Offset &o); - Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh + Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh - std::vector> m_InRankParticleLayout; + std::vector> m_InRankParticleLayout; }; - /** Class TestInput * */ class TestInput { public: - TestInput() = default; - - /** GetSeg() - * return number of partitions along the long dimension - * m_Seg can be set from input - * exception is when h5 collective mode is on. m_Seg=1 - */ - unsigned int GetSeg() const - { - if (m_Backend == ".h5") - if (auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "ON" ) != "ON") - return 1; - if (m_Seg > 0) - return m_Seg; - return 1; - } - - - int m_MPISize = 1; //!< MPI size - int m_MPIRank = 0; //!< MPI rank - - unsigned long m_XBulk = 64ul; //!< min num of elements at X dimension - unsigned long m_YBulk = 32ul; //!< min num of elements at Y dimension - unsigned long m_ZBulk = 32ul; - - /** relative expansion of min grid(m_XBulk, m_YBulk, m_ZBulk) - * to form a max block. By default max:min=1, meaning suggested - * max block is the same as min block. This parameter is effective - * when the suggested max block size x m_MPISize = global_mesh. - * In other words, this option is set to let per rank workload be - * the max block (and the multiple mini blocks will be from there) - */ - Extent m_MaxOverMin = {1,1,1}; - - int m_Dim = 3; // mesh dim; - /** number of subdivisions for the elements - * - * note that with h5collect mode, m_Seg must be 1 - */ - unsigned int m_Seg = 1; - int m_Steps = 1; //!< num of iterations - std::string m_Backend = ".bp"; //!< I/O backend by file ending - bool m_Unbalance = false; //! load is different among processors - - int m_Ratio = 1; //! particle:mesh ratio - unsigned long m_XFactor = 0; // if not overwritten, use m_MPISize - unsigned long m_YFactor = 8; - unsigned long m_ZFactor = 8; -}; // class TestInput + TestInput() = default; + + /** GetSeg() + * return number of partitions along the long dimension + * m_Seg can be set from input + * exception is when h5 collective mode is on. m_Seg=1 + */ + unsigned int GetSeg() const + { + if (m_Backend == ".h5") + if (auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "ON") != + "ON") + return 1; + if (m_Seg > 0) + return m_Seg; + return 1; + } + + int m_MPISize = 1; //!< MPI size + int m_MPIRank = 0; //!< MPI rank + + unsigned long m_XBulk = 64ul; //!< min num of elements at X dimension + unsigned long m_YBulk = 32ul; //!< min num of elements at Y dimension + unsigned long m_ZBulk = 32ul; + /** relative expansion of min grid(m_XBulk, m_YBulk, m_ZBulk) + * to form a max block. By default max:min=1, meaning suggested + * max block is the same as min block. This parameter is effective + * when the suggested max block size x m_MPISize = global_mesh. + * In other words, this option is set to let per rank workload be + * the max block (and the multiple mini blocks will be from there) + */ + Extent m_MaxOverMin = {1, 1, 1}; + + int m_Dim = 3; // mesh dim; + /** number of subdivisions for the elements + * + * note that with h5collect mode, m_Seg must be 1 + */ + unsigned int m_Seg = 1; + int m_Steps = 1; //!< num of iterations + std::string m_Backend = ".bp"; //!< I/O backend by file ending + bool m_Unbalance = false; //! load is different among processors + + int m_Ratio = 1; //! particle:mesh ratio + unsigned long m_XFactor = 0; // if not overwritten, use m_MPISize + unsigned long m_YFactor = 8; + unsigned long m_ZFactor = 8; +}; // class TestInput -void parse(TestInput& input, std::string line) +void parse(TestInput &input, std::string line) { - // no valid input a=b - if ( line.size() <= 3 ) - return; - if ( line[0] == '#' ) - return; - - std::istringstream iline(line); - - std::string s; - std::vector vec; - while ( std::getline( iline, s, '=' ) ) - vec.push_back(s); - - if ( vec.size() != 2 ) - return; - - if ( vec[0].compare("dim") == 0 ) { - input.m_Dim = atoi(vec[1].c_str()); - return; - } - - if ( vec[0].compare("balanced") == 0 ) { - if ( vec[1].compare("false") == 0 ) - input.m_Unbalance = true; - return; - } - - if ( vec[0].compare("ratio") == 0 ) { - input.m_Ratio = atoi(vec[1].c_str()); - return; - } - - if ( vec[0].compare("steps") == 0 ) { - input.m_Steps = atoi(vec[1].c_str()); - return; - } - - if ( vec[0].compare("rankBlocks") == 0 ) { - if ( vec[1].compare("false") == 0 ) - input.m_Seg = 10; - return; - } - - // now vec[1] is N-dim integers - std::vector numbers; - std::istringstream tmp(vec[1]); - while ( std::getline( tmp, s, ' ' ) ) - numbers.push_back(strtoul( s.c_str(), nullptr, 0 )); - - if ( (numbers.size() == 0) || ((numbers.size() - input.m_Dim) != 0) ) { - if ( input.m_MPIRank == 0 ) - std::cout< 0) input.m_YBulk = numbers[1]; - if (numbers.size() > 1) input.m_ZBulk = numbers[2]; - } - - if ( vec[0].compare("grid") == 0 ) { - input.m_XFactor = numbers[0]; - if (numbers.size() > 0) input.m_YFactor = numbers[1]; - if (numbers.size() > 1) input.m_ZFactor = numbers[2]; - } + // no valid input a=b + if (line.size() <= 3) + return; + if (line[0] == '#') + return; + + std::istringstream iline(line); + + std::string s; + std::vector vec; + while (std::getline(iline, s, '=')) + vec.push_back(s); + + if (vec.size() != 2) + return; + + if (vec[0].compare("dim") == 0) + { + input.m_Dim = atoi(vec[1].c_str()); + return; + } + + if (vec[0].compare("balanced") == 0) + { + if (vec[1].compare("false") == 0) + input.m_Unbalance = true; + return; + } + + if (vec[0].compare("ratio") == 0) + { + input.m_Ratio = atoi(vec[1].c_str()); + return; + } + + if (vec[0].compare("steps") == 0) + { + input.m_Steps = atoi(vec[1].c_str()); + return; + } + + if (vec[0].compare("rankBlocks") == 0) + { + if (vec[1].compare("false") == 0) + input.m_Seg = 10; + return; + } + + // now vec[1] is N-dim integers + std::vector numbers; + std::istringstream tmp(vec[1]); + while (std::getline(tmp, s, ' ')) + numbers.push_back(strtoul(s.c_str(), nullptr, 0)); + + if ((numbers.size() == 0) || ((numbers.size() - input.m_Dim) != 0)) + { + if (input.m_MPIRank == 0) + std::cout << vec[1] << " Expecting " << input.m_Dim + << " dimensions. But given input is" << numbers.size() + << std::endl; + return; + } + + if (vec[0].compare("minBlock") == 0) + { + input.m_XBulk = numbers[0]; + if (numbers.size() > 0) + input.m_YBulk = numbers[1]; + if (numbers.size() > 1) + input.m_ZBulk = numbers[2]; + } + + if (vec[0].compare("grid") == 0) + { + input.m_XFactor = numbers[0]; + if (numbers.size() > 0) + input.m_YFactor = numbers[1]; + if (numbers.size() > 1) + input.m_ZFactor = numbers[2]; + } } -int parseArgs( int argc, char *argv[], TestInput& input ) +int parseArgs(int argc, char *argv[], TestInput &input) { - if ( argc == 2 ) { - std::fstream infile; - infile.open(argv[1], std::ios::in); - if ( !infile.is_open() ) { - if ( input.m_MPIRank == 0 ) std::cout<< "No such file: "<= 2 ) { - // coded as: b..b/aaa/c/d=[Yfactor][Xfactor][Balance][Ratio] - // e.g. 200413 => ratio:3; Unbalance:yes; xfactor=4; yfactor=2 - int num = atoi( argv[1] ) ; - if ( num > 10 ) - input.m_Unbalance = (num/10 % 10 > 0); - - if ( num <= 0) - num = 1; - input.m_Ratio = (num-1) % 10 + 1; - - if ( num > 100 ) { - input.m_XFactor = num/100; - if ( input.m_XFactor > 1000 ) { - input.m_YFactor = input.m_XFactor/1000 % 1000; - if ( input.m_XFactor > 1000000 ) - input.m_ZFactor = input.m_XFactor/1000000 % 1000; + if (argc >= 2) + { + // coded as: b..b/aaa/c/d=[Yfactor][Xfactor][Balance][Ratio] + // e.g. 200413 => ratio:3; Unbalance:yes; xfactor=4; yfactor=2 + int num = atoi(argv[1]); + if (num > 10) + input.m_Unbalance = (num / 10 % 10 > 0); + + if (num <= 0) + num = 1; + input.m_Ratio = (num - 1) % 10 + 1; + + if (num > 100) + { + input.m_XFactor = num / 100; + if (input.m_XFactor > 1000) + { + input.m_YFactor = input.m_XFactor / 1000 % 1000; + if (input.m_XFactor > 1000000) + input.m_ZFactor = input.m_XFactor / 1000000 % 1000; else - input.m_ZFactor = input.m_YFactor; + input.m_ZFactor = input.m_YFactor; input.m_XFactor = input.m_XFactor % 1000; - } - } + } + } } - if( argc >= 3 ) - input.m_XBulk = strtoul( argv[2], nullptr, 0 ); + if (argc >= 3) + input.m_XBulk = strtoul(argv[2], nullptr, 0); // e.g. 32064 => [64,32] - if ( input.m_XBulk > 1000 ) + if (input.m_XBulk > 1000) { - input.m_YBulk = input.m_XBulk/1000 % 1000; - if ( input.m_XBulk > 1000000 ) - input.m_ZBulk = input.m_XBulk/1000000 % 1000; - else - input.m_ZBulk = input.m_YBulk; - input.m_XBulk = input.m_XBulk % 1000; + input.m_YBulk = input.m_XBulk / 1000 % 1000; + if (input.m_XBulk > 1000000) + input.m_ZBulk = input.m_XBulk / 1000000 % 1000; + else + input.m_ZBulk = input.m_YBulk; + input.m_XBulk = input.m_XBulk % 1000; } // if m_Seg > 1; then data of var will be stored as chunks of minigrid // else store as one big block - if( argc >= 4 ) - input.m_Seg = atoi( argv[3] ); + if (argc >= 4) + input.m_Seg = atoi(argv[3]); - if( argc >= 5 ) - input.m_Steps = atoi( argv[4] ); + if (argc >= 5) + input.m_Steps = atoi(argv[4]); if (argc >= 6) - input.m_Dim = atoi( argv[5] ); + input.m_Dim = atoi(argv[5]); - if (argc >= 7) { - long val = strtoul( argv[6], nullptr, 0 ); - input.m_MaxOverMin[0] = val % 1000; + if (argc >= 7) + { + long val = strtoul(argv[6], nullptr, 0); + input.m_MaxOverMin[0] = val % 1000; - if ( val >= 1000 ) - input.m_MaxOverMin[1] = (val/1000) % 1000; - if ( val >= 1000000 ) - input.m_MaxOverMin[2] = (val/1000000) % 1000; + if (val >= 1000) + input.m_MaxOverMin[1] = (val / 1000) % 1000; + if (val >= 1000000) + input.m_MaxOverMin[2] = (val / 1000000) % 1000; } return input.m_Dim; @@ -496,48 +535,56 @@ int parseArgs( int argc, char *argv[], TestInput& input ) * * description of runtime options/flags */ -int -main( int argc, char *argv[] ) +int main(int argc, char *argv[]) { - MPI_Init( &argc, &argv ); + MPI_Init(&argc, &argv); TestInput input; - MPI_Comm_size( MPI_COMM_WORLD, &input.m_MPISize ); - MPI_Comm_rank( MPI_COMM_WORLD, &input.m_MPIRank ); + MPI_Comm_size(MPI_COMM_WORLD, &input.m_MPISize); + MPI_Comm_rank(MPI_COMM_WORLD, &input.m_MPIRank); int dataDim = parseArgs(argc, argv, input); - if ( ( dataDim <= 0 ) || ( dataDim > 3 ) ) { - if ( 0 == input.m_MPIRank) - std::cerr<<" Sorry, Only supports data 1D 2D 3D! not "< 3)) + { + if (0 == input.m_MPIRank) + std::cerr << " Sorry, Only supports data 1D 2D 3D! not " << dataDim + << std::endl; + return -1; } - Timer g( " Main ", input.m_MPIRank ); + Timer g(" Main ", input.m_MPIRank); - if ( 0 == input.m_XFactor ) - input.m_XFactor = input.m_MPISize; + if (0 == input.m_XFactor) + input.m_XFactor = input.m_MPISize; auto const backends = getBackends(); - try { - for( auto const & which: backends ) - { - input.m_Backend = which; - if ( 1 == dataDim ) { - OneDimPattern p1(input); - p1.run(); - } else if ( 2 == dataDim ) { - TwoDimPattern p2(input); - p2.run(); - } else { - ThreeDimPattern p3(input); - p3.run(); + try + { + for (auto const &which : backends) + { + input.m_Backend = which; + if (1 == dataDim) + { + OneDimPattern p1(input); + p1.run(); + } + else if (2 == dataDim) + { + TwoDimPattern p2(input); + p2.run(); + } + else + { + ThreeDimPattern p3(input); + p3.run(); + } } - } } - catch (std::exception const & ex ) + catch (std::exception const &ex) { - if (0 == input.m_MPIRank) std::cout<<"Error: "<(); - Dataset dataset = Dataset( datatype, m_GlobalMesh ); + Datatype datatype = determineDatatype(); + Dataset dataset = Dataset(datatype, m_GlobalMesh); - compA.resetDataset( dataset ); + compA.resetDataset(dataset); auto nBlocks = getNumBlocks(); - for ( unsigned int n=0; n 0) { - auto const value = double(1.0*n + 0.01*step); - auto A = createData( blockSize, value, 0.0001 ) ; - compA.storeChunk( A, meshOffset, meshExtent ); + if (blockSize > 0) + { + auto const value = double(1.0 * n + 0.01 * step); + auto A = createData(blockSize, value, 0.0001); + compA.storeChunk(A, meshOffset, meshExtent); } - } - } - - /* - * Write particles. (always 1D) - * - * @param ParticleSpecies Input - * @param step Iteration step - * - */ - void - AbstractPattern::storeParticles( ParticleSpecies& currSpecies, int& step ) - { - currSpecies.setAttribute( "particleSmoothing", "none" ); - currSpecies.setAttribute( "openPMD_STEP", step ); - currSpecies.setAttribute( "p2mRatio", m_Input.m_Ratio ); + } +} + +/* + * Write particles. (always 1D) + * + * @param ParticleSpecies Input + * @param step Iteration step + * + */ +void AbstractPattern::storeParticles(ParticleSpecies &currSpecies, int &step) +{ + currSpecies.setAttribute("particleSmoothing", "none"); + currSpecies.setAttribute("openPMD_STEP", step); + currSpecies.setAttribute("p2mRatio", m_Input.m_Ratio); auto np = getTotalNumParticles(); - auto const intDataSet = openPMD::Dataset(openPMD::determineDatatype< uint64_t >(), {np}); - auto const realDataSet = openPMD::Dataset(openPMD::determineDatatype< double >(), {np}); - currSpecies["id"][RecordComponent::SCALAR].resetDataset( intDataSet ); - currSpecies["charge"][RecordComponent::SCALAR].resetDataset( realDataSet ); + auto const intDataSet = + openPMD::Dataset(openPMD::determineDatatype(), {np}); + auto const realDataSet = + openPMD::Dataset(openPMD::determineDatatype(), {np}); + currSpecies["id"][RecordComponent::SCALAR].resetDataset(intDataSet); + currSpecies["charge"][RecordComponent::SCALAR].resetDataset(realDataSet); - currSpecies["position"]["x"].resetDataset( realDataSet ); + currSpecies["position"]["x"].resetDataset(realDataSet); - currSpecies["positionOffset"]["x"].resetDataset( realDataSet ); - currSpecies["positionOffset"]["x"].makeConstant( 0. ); + currSpecies["positionOffset"]["x"].resetDataset(realDataSet); + currSpecies["positionOffset"]["x"].makeConstant(0.); auto nBlocks = getNumBlocks(); - for ( unsigned int n=0; n 0) { - auto ids = createData( count, offset, 1 ) ; - currSpecies["id"][RecordComponent::SCALAR].storeChunk(ids, {offset}, {count}); - - auto charges = createData(count, 0.1*step, 0.0001) ; - currSpecies["charge"][RecordComponent::SCALAR].storeChunk(charges, - {offset}, {count}); + // std::cout< 0) + { + auto ids = createData(count, offset, 1); + currSpecies["id"][RecordComponent::SCALAR].storeChunk( + ids, {offset}, {count}); - auto mx = createData(count, 1.0*step, 0.0002) ; - currSpecies["position"]["x"].storeChunk(mx, - {offset}, {count}); + auto charges = createData(count, 0.1 * step, 0.0001); + currSpecies["charge"][RecordComponent::SCALAR].storeChunk( + charges, {offset}, {count}); + auto mx = createData(count, 1.0 * step, 0.0002); + currSpecies["position"]["x"].storeChunk(mx, {offset}, {count}); + } } - } - } // storeParticles - +} // storeParticles - /* - * Return total number of particles - * set to be a multiple of mesh size - * - */ +/* + * Return total number of particles + * set to be a multiple of mesh size + * + */ unsigned long AbstractPattern::getTotalNumParticles() - { +{ unsigned long result = m_Input.m_Ratio; for (unsigned long i : m_GlobalMesh) - result *= i; + result *= i; return result; - } +} /* * Print pattern layout */ void AbstractPattern::PrintMe() - { +{ int ndim = m_MinBlock.size(); - if ( !m_Input.m_MPIRank ) + if (!m_Input.m_MPIRank) { - std::ostringstream g; g<<"\nGlobal: [ "; - std::ostringstream u; u<<" Unit: [ "; - std::ostringstream m; m<<" Block: [ "; - for ( auto i=0; i m_InRankMeshLayout.size()) - return; + return; offset = m_InRankParticleLayout[n].first; - count = m_InRankParticleLayout[n].second; - } - + count = m_InRankParticleLayout[n].second; +} /* * Get nth particel extent in a rank @@ -833,14 +893,15 @@ void TwoDimPattern::getNthParticleExtent( unsigned int n, unsigned long& offset, * @param offset: return * @param count: return */ -void ThreeDimPattern::getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) - { +void ThreeDimPattern::getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) +{ if (n > m_InRankMeshLayout.size()) - return; + return; offset = m_InRankParticleLayout[n].first; - count = m_InRankParticleLayout[n].second; - } + count = m_InRankParticleLayout[n].second; +} /* * Set layout @@ -855,48 +916,50 @@ bool OneDimPattern::setLayOut(int step) unsigned long unitOffset = m_Input.m_MPIRank * unitCount; - if ( m_Input.m_MPISize >= 2 ) + if (m_Input.m_MPISize >= 2) { - if ( m_Input.m_Unbalance ) - { - if (step % 3 == 1) + if (m_Input.m_Unbalance) { - if ( m_Input.m_MPIRank % 10 == 0 ) // no load - unitCount = 0; - if ( m_Input.m_MPIRank % 10 == 1 ) // double load - { - unitOffset -= unitCount; - unitCount += unitCount; + if (step % 3 == 1) + { + if (m_Input.m_MPIRank % 10 == 0) // no load + unitCount = 0; + if (m_Input.m_MPIRank % 10 == 1) // double load + { + unitOffset -= unitCount; + unitCount += unitCount; + } + } } } - } - } if (0 == unitCount) - return true; + return true; - auto numPartition = m_Input.GetSeg(); - if ( unitCount < numPartition ) - numPartition = unitCount; + auto numPartition = m_Input.GetSeg(); + if (unitCount < numPartition) + numPartition = unitCount; auto avg = unitCount / numPartition; - for ( unsigned int i=0 ; i< numPartition; i++ ) + for (unsigned int i = 0; i < numPartition; i++) { - Offset offset = { unitOffset * m_MinBlock[0] }; - if ( i < (numPartition - 1) ) { - Extent count = { avg * m_MinBlock[0] }; - m_InRankMeshLayout.emplace_back(offset, count); - } else { - auto res = unitCount - avg * (numPartition - 1); - Extent count = { res * m_MinBlock[0] }; - m_InRankMeshLayout.emplace_back(offset, count); - } + Offset offset = {unitOffset * m_MinBlock[0]}; + if (i < (numPartition - 1)) + { + Extent count = {avg * m_MinBlock[0]}; + m_InRankMeshLayout.emplace_back(offset, count); + } + else + { + auto res = unitCount - avg * (numPartition - 1); + Extent count = {res * m_MinBlock[0]}; + m_InRankMeshLayout.emplace_back(offset, count); + } } return true; } - /* * Retrieves ParticleExtent * @param n: nth block for this rank @@ -904,143 +967,153 @@ bool OneDimPattern::setLayOut(int step) * @param count: return * */ -void OneDimPattern::getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) - { +void OneDimPattern::getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) +{ if (n > m_InRankMeshLayout.size()) - return; + return; offset = indexMe(m_InRankMeshLayout[n].first) * m_Input.m_Ratio; - count = countMe(m_InRankMeshLayout[n].second) * m_Input.m_Ratio; - } - + count = countMe(m_InRankMeshLayout[n].second) * m_Input.m_Ratio; +} /* Constructor TwoDimPattern * Defines 2D layout * @param input: user specifications */ -TwoDimPattern::TwoDimPattern(const TestInput& input) - :AbstractPattern(input) +TwoDimPattern::TwoDimPattern(const TestInput &input) : AbstractPattern(input) { - m_GlobalMesh = { input.m_XBulk * input.m_XFactor, input.m_YBulk * input.m_YFactor }; - m_MinBlock = { input.m_XBulk, input.m_YBulk }; - - m_GlobalUnitMesh = { input.m_XFactor, input.m_YFactor }; - - auto m = (input.m_XFactor * input.m_YFactor) % input.m_MPISize; - if ( m != 0) - throw std::runtime_error( "Unable to balance load for 2D mesh among ranks "); - - m = (input.m_XFactor * input.m_YFactor) / input.m_MPISize; - - if ( input.m_XFactor % input.m_MPISize == 0 ) - m_PatchUnitMesh = { input.m_XFactor / input.m_MPISize, m_GlobalUnitMesh[1] }; - else if ( input.m_YFactor % input.m_MPISize == 0 ) - m_PatchUnitMesh = { m_GlobalUnitMesh[0], input.m_YFactor / input.m_MPISize }; - else if ( input.m_XFactor % m == 0 ) - m_PatchUnitMesh = {m, 1}; - else if ( input.m_YFactor % m == 0 ) - m_PatchUnitMesh = {1, m}; - else // e.g. unitMesh={8,9} mpisize=12, m=6, patch unit needs to be {4,3} + m_GlobalMesh = { + input.m_XBulk * input.m_XFactor, input.m_YBulk * input.m_YFactor}; + m_MinBlock = {input.m_XBulk, input.m_YBulk}; + + m_GlobalUnitMesh = {input.m_XFactor, input.m_YFactor}; + + auto m = (input.m_XFactor * input.m_YFactor) % input.m_MPISize; + if (m != 0) + throw std::runtime_error( + "Unable to balance load for 2D mesh among ranks "); + + m = (input.m_XFactor * input.m_YFactor) / input.m_MPISize; + + if (input.m_XFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + input.m_XFactor / input.m_MPISize, m_GlobalUnitMesh[1]}; + else if (input.m_YFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + m_GlobalUnitMesh[0], input.m_YFactor / input.m_MPISize}; + else if (input.m_XFactor % m == 0) + m_PatchUnitMesh = {m, 1}; + else if (input.m_YFactor % m == 0) + m_PatchUnitMesh = {1, m}; + else // e.g. unitMesh={8,9} mpisize=12, m=6, patch unit needs to be {4,3} { - throw std::runtime_error( "Wait for next version with other 2D patch configurations" ); + throw std::runtime_error( + "Wait for next version with other 2D patch configurations"); } - PrintMe(); + PrintMe(); } - - /* * Set layout * @param step: iteration step * */ -bool TwoDimPattern::setLayOut(int step) { +bool TwoDimPattern::setLayOut(int step) +{ m_InRankMeshLayout.clear(); m_InRankParticleLayout.clear(); unsigned long patchOffset = m_Input.m_MPIRank; - unsigned long patchCount = 1; + unsigned long patchCount = 1; - if ( m_Input.m_MPISize >= 2 ) + if (m_Input.m_MPISize >= 2) { - if ( m_Input.m_Unbalance ) - { - if (step % 3 == 1) + if (m_Input.m_Unbalance) { - if ( m_Input.m_MPIRank % 4 == 0 ) // no load - patchCount = 0; - if ( m_Input.m_MPIRank % 4 == 1 ) // double load - { - patchOffset -= patchCount; - patchCount += patchCount; + if (step % 3 == 1) + { + if (m_Input.m_MPIRank % 4 == 0) // no load + patchCount = 0; + if (m_Input.m_MPIRank % 4 == 1) // double load + { + patchOffset -= patchCount; + patchCount += patchCount; + } } - } - } + } } if (0 == patchCount) - return true; + return true; - auto numPartition = m_Input.GetSeg(); + auto numPartition = m_Input.GetSeg(); - Extent patchGrid = { m_GlobalUnitMesh[0]/m_PatchUnitMesh[0], - m_GlobalUnitMesh[1]/m_PatchUnitMesh[1] }; + Extent patchGrid = { + m_GlobalUnitMesh[0] / m_PatchUnitMesh[0], + m_GlobalUnitMesh[1] / m_PatchUnitMesh[1]}; - Offset p {0,0}; + Offset p{0, 0}; coordinate(patchOffset, patchGrid, p); - Offset c {1,1}; - if ( patchCount > 1 ) { - coordinate( patchCount -1, patchGrid, c); - c[0] += 1; - c[1] += 1; + Offset c{1, 1}; + if (patchCount > 1) + { + coordinate(patchCount - 1, patchGrid, c); + c[0] += 1; + c[1] += 1; } // particle offset at this rank - unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * countMe(m_MinBlock) * m_Input.m_Ratio; + unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * + countMe(m_MinBlock) * m_Input.m_Ratio; - if ( 1 == numPartition ) - { - Offset offset = { p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - p[1] * m_PatchUnitMesh[1] * m_MinBlock[1] }; + if (1 == numPartition) + { + Offset offset = { + p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + p[1] * m_PatchUnitMesh[1] * m_MinBlock[1]}; - Extent count = { c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - c[1] * m_PatchUnitMesh[1] * m_MinBlock[1] }; + Extent count = { + c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + c[1] * m_PatchUnitMesh[1] * m_MinBlock[1]}; - m_InRankMeshLayout.emplace_back(offset, count); + m_InRankMeshLayout.emplace_back(offset, count); - auto pCount = countMe(count) * m_Input.m_Ratio; - m_InRankParticleLayout.emplace_back(pOff, pCount); - } + auto pCount = countMe(count) * m_Input.m_Ratio; + m_InRankParticleLayout.emplace_back(pOff, pCount); + } else - { - Offset unitOffset = { p[0] * m_PatchUnitMesh[0], p[1] * m_PatchUnitMesh[1] }; - Extent unitExtent = { c[0] * m_PatchUnitMesh[0], c[1] * m_PatchUnitMesh[1] }; + { + Offset unitOffset = { + p[0] * m_PatchUnitMesh[0], p[1] * m_PatchUnitMesh[1]}; + Extent unitExtent = { + c[0] * m_PatchUnitMesh[0], c[1] * m_PatchUnitMesh[1]}; - auto counter = pOff; + auto counter = pOff; - for ( unsigned long i=0; i m_InRankMeshLayout.size()) - return 0; + return 0; offset = m_InRankMeshLayout[n].first; - count = m_InRankMeshLayout[n].second; + count = m_InRankMeshLayout[n].second; return countMe(count); - } +} /* * Get coordinate given c order index @@ -1072,52 +1146,53 @@ unsigned long AbstractPattern::getNthMeshExtent( unsigned int n, Offset& offset * @param grid: layout * @param result: return */ -inline void TwoDimPattern::coordinate(unsigned long idx, const Extent& grid, Offset& result) +inline void +TwoDimPattern::coordinate(unsigned long idx, const Extent &grid, Offset &result) { - auto yy = idx % grid[1]; - auto xx = ( idx - yy ) / grid[1]; + auto yy = idx % grid[1]; + auto xx = (idx - yy) / grid[1]; - result[0] = xx; - result[1] = yy; + result[0] = xx; + result[1] = yy; } /* Returns c order index in the global mesh * @param offset: input, offset in the global mesh */ -inline unsigned long AbstractPattern::indexMe(const Offset& offset) +inline unsigned long AbstractPattern::indexMe(const Offset &offset) { - if (offset.size() == 0) - return 0; + if (offset.size() == 0) + return 0; - if (offset.size() == 1) - return offset[0]; + if (offset.size() == 1) + return offset[0]; - if (offset.size() == 2) + if (offset.size() == 2) { - unsigned long result = offset[1]; - result += offset[0] * m_GlobalMesh[1]; - return result; + unsigned long result = offset[1]; + result += offset[0] * m_GlobalMesh[1]; + return result; } - return 0; + return 0; } /* computes size of a block * @param count: block extent */ -inline unsigned long AbstractPattern::countMe(const Extent& count) +inline unsigned long AbstractPattern::countMe(const Extent &count) { - if (count.size() == 0) - return 0; + if (count.size() == 0) + return 0; - unsigned long result = count[0]; - if ( count.size() >= 2 ) - result *= count[1]; + unsigned long result = count[0]; + if (count.size() >= 2) + result *= count[1]; - if ( count.size() >= 3 ) - result *= count[2]; + if (count.size() >= 3) + result *= count[2]; - return result; + return result; } /* @@ -1126,173 +1201,201 @@ inline unsigned long AbstractPattern::countMe(const Extent& count) * @param grid: layout * @param result: return */ -inline void ThreeDimPattern::coordinate(unsigned long idx, const Extent& grid, Offset& result) +inline void ThreeDimPattern::coordinate( + unsigned long idx, const Extent &grid, Offset &result) { - auto zz = idx % grid[2]; - auto m = (idx - zz)/grid[2]; - auto yy = m % grid[1]; - auto xx = ( m - yy ) / grid[1]; - - result[0] = xx; - result[1] = yy; - result[2] = zz; + auto zz = idx % grid[2]; + auto m = (idx - zz) / grid[2]; + auto yy = m % grid[1]; + auto xx = (m - yy) / grid[1]; + + result[0] = xx; + result[1] = yy; + result[2] = zz; } - /* * Constructor ThreeDimPattern * Defines 3D layout * @param input: user specifications * */ -ThreeDimPattern::ThreeDimPattern(const TestInput& input) - :AbstractPattern(input) +ThreeDimPattern::ThreeDimPattern(const TestInput &input) + : AbstractPattern(input) { - { - m_GlobalMesh = { input.m_XBulk * input.m_XFactor, - input.m_YBulk * input.m_YFactor, - input.m_ZBulk * input.m_ZFactor }; // Z & Y have same size + { + m_GlobalMesh = { + input.m_XBulk * input.m_XFactor, + input.m_YBulk * input.m_YFactor, + input.m_ZBulk * input.m_ZFactor}; // Z & Y have same size - m_MinBlock = { input.m_XBulk, input.m_YBulk, input.m_ZBulk }; - m_GlobalUnitMesh = { input.m_XFactor, input.m_YFactor, input.m_ZFactor }; + m_MinBlock = {input.m_XBulk, input.m_YBulk, input.m_ZBulk}; + m_GlobalUnitMesh = {input.m_XFactor, input.m_YFactor, input.m_ZFactor}; - PrintMe(); - } - - //unsigned long zFactor = input.m_YFactor; - auto m = (input.m_ZFactor * input.m_XFactor * input.m_YFactor) % input.m_MPISize; - if ( m != 0) - throw std::runtime_error( "Unable to balance load for 3D mesh among ranks "); - - m = (input.m_ZFactor * input.m_XFactor * input.m_YFactor) / input.m_MPISize; - auto maxRatio = input.m_MaxOverMin[0] * input.m_MaxOverMin[1] * input.m_MaxOverMin[2]; - if ( maxRatio == m ) { - m_PatchUnitMesh = { input.m_MaxOverMin[0], input.m_MaxOverMin[1], input.m_MaxOverMin[2] }; - if ( !m_Input.m_MPIRank ) - std::cout<<" Using maxOverMin="< 0) && ( (input.m_XFactor * input.m_YFactor) % input.m_MPISize == 0 )) - { - if ( input.m_XFactor % m == 0 ) - m_PatchUnitMesh = {m, 1, input.m_ZFactor}; - else if ( input.m_YFactor % m == 0 ) - m_PatchUnitMesh = {1, m, input.m_ZFactor}; - else - throw std::runtime_error( "Wait for next version with other 3D patch configurations" ); - } - } + PrintMe(); + } + + // unsigned long zFactor = input.m_YFactor; + auto m = + (input.m_ZFactor * input.m_XFactor * input.m_YFactor) % input.m_MPISize; + if (m != 0) + throw std::runtime_error( + "Unable to balance load for 3D mesh among ranks "); + + m = (input.m_ZFactor * input.m_XFactor * input.m_YFactor) / input.m_MPISize; + auto maxRatio = + input.m_MaxOverMin[0] * input.m_MaxOverMin[1] * input.m_MaxOverMin[2]; + if (maxRatio == m) + { + m_PatchUnitMesh = { + input.m_MaxOverMin[0], + input.m_MaxOverMin[1], + input.m_MaxOverMin[2]}; + if (!m_Input.m_MPIRank) + std::cout << " Using maxOverMin=" << input.m_MaxOverMin[0] << ", " + << input.m_MaxOverMin[1] << ", " << input.m_MaxOverMin[2] + << std::endl; + ; + return; + } + + if (input.m_XFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + input.m_XFactor / input.m_MPISize, + m_GlobalUnitMesh[1], + m_GlobalUnitMesh[2]}; + else if (input.m_YFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + m_GlobalUnitMesh[0], + input.m_YFactor / input.m_MPISize, + m_GlobalUnitMesh[2]}; + else if (input.m_XFactor % m == 0) + m_PatchUnitMesh = {m, 1, 1}; + else if (input.m_YFactor % m == 0) + m_PatchUnitMesh = {1, m, 1}; + else if (input.m_ZFactor % m == 0) + m_PatchUnitMesh = {1, 1, m}; + else + { + m = (input.m_XFactor * input.m_YFactor) / input.m_MPISize; + if ((m > 0) && + ((input.m_XFactor * input.m_YFactor) % input.m_MPISize == 0)) + { + if (input.m_XFactor % m == 0) + m_PatchUnitMesh = {m, 1, input.m_ZFactor}; + else if (input.m_YFactor % m == 0) + m_PatchUnitMesh = {1, m, input.m_ZFactor}; + else + throw std::runtime_error( + "Wait for next version with other 3D patch configurations"); + } + } } /* * set layout of grids * @ param step: iteration step */ -bool ThreeDimPattern::setLayOut(int step) { +bool ThreeDimPattern::setLayOut(int step) +{ m_InRankMeshLayout.clear(); m_InRankParticleLayout.clear(); unsigned long patchOffset = m_Input.m_MPIRank; - unsigned long patchCount = 1; + unsigned long patchCount = 1; - if ( m_Input.m_MPISize >= 2 ) + if (m_Input.m_MPISize >= 2) { - if ( m_Input.m_Unbalance ) - { - if (step % 3 == 1) + if (m_Input.m_Unbalance) { - if ( m_Input.m_MPIRank % 4 == 0 ) // no load - patchCount = 0; - if ( m_Input.m_MPIRank % 4 == 1 ) // double load - { - patchOffset -= patchCount; - patchCount += patchCount; - } + if (step % 3 == 1) + { + if (m_Input.m_MPIRank % 4 == 0) // no load + patchCount = 0; + if (m_Input.m_MPIRank % 4 == 1) // double load + { + patchOffset -= patchCount; + patchCount += patchCount; + } + } } - } } if (0 == patchCount) - return true; + return true; - auto numPartition = m_Input.GetSeg(); + auto numPartition = m_Input.GetSeg(); - Extent patchGrid = { m_GlobalUnitMesh[0]/m_PatchUnitMesh[0], - m_GlobalUnitMesh[1]/m_PatchUnitMesh[1], - m_GlobalUnitMesh[2]/m_PatchUnitMesh[2] }; + Extent patchGrid = { + m_GlobalUnitMesh[0] / m_PatchUnitMesh[0], + m_GlobalUnitMesh[1] / m_PatchUnitMesh[1], + m_GlobalUnitMesh[2] / m_PatchUnitMesh[2]}; - - Offset p {0, 0, 0}; + Offset p{0, 0, 0}; coordinate(patchOffset, patchGrid, p); - Offset c {1,1,1}; - if ( patchCount > 1 ) { - coordinate(patchCount -1, patchGrid, c); - c[0] += 1; - c[1] += 1; - c[2] += 1; + Offset c{1, 1, 1}; + if (patchCount > 1) + { + coordinate(patchCount - 1, patchGrid, c); + c[0] += 1; + c[1] += 1; + c[2] += 1; } // particle offset at this rank - unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * countMe(m_MinBlock) * m_Input.m_Ratio; + unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * + countMe(m_MinBlock) * m_Input.m_Ratio; if (1 == numPartition) - { - Offset offset = { p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - p[1] * m_PatchUnitMesh[1] * m_MinBlock[1], - p[2] * m_PatchUnitMesh[2] * m_MinBlock[2] }; + { + Offset offset = { + p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + p[1] * m_PatchUnitMesh[1] * m_MinBlock[1], + p[2] * m_PatchUnitMesh[2] * m_MinBlock[2]}; - Extent count = { c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - c[1] * m_PatchUnitMesh[1] * m_MinBlock[1], - c[2] * m_PatchUnitMesh[2] * m_MinBlock[2] }; + Extent count = { + c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + c[1] * m_PatchUnitMesh[1] * m_MinBlock[1], + c[2] * m_PatchUnitMesh[2] * m_MinBlock[2]}; - m_InRankMeshLayout.emplace_back(offset, count); + m_InRankMeshLayout.emplace_back(offset, count); - auto pCount = countMe(count) * m_Input.m_Ratio; - m_InRankParticleLayout.emplace_back(pOff, pCount); - } + auto pCount = countMe(count) * m_Input.m_Ratio; + m_InRankParticleLayout.emplace_back(pOff, pCount); + } else - { - Offset unitOffset = { p[0] * m_PatchUnitMesh[0], p[1] * m_PatchUnitMesh[1], p[2] * m_PatchUnitMesh[2] }; - Extent unitExtent = { c[0] * m_PatchUnitMesh[0], c[1] * m_PatchUnitMesh[1], c[2] * m_PatchUnitMesh[2] }; - - auto counter = pOff; - - for ( unsigned long i=0; i. */ -#include #include +#include #include +#include +#include #include +#include #include -#include -#include -#include -#include #include -#include +#include #include +#include #if openPMD_HAVE_ADIOS2 -# include +#include #endif using std::cout; using namespace openPMD; - /** The Memory profiler class for profiling purpose * * Simple Memory usage report that works on linux system */ -static std::chrono::time_point m_ProgStart = std::chrono::system_clock::now(); +static std::chrono::time_point m_ProgStart = + std::chrono::system_clock::now(); class MemoryProfiler { @@ -56,56 +56,60 @@ class MemoryProfiler * @param[in] rank MPI rank * @param[in] tag item name to measure */ - MemoryProfiler(int rank, const std::string& tag) { - m_Rank = rank; + MemoryProfiler(int rank, const std::string &tag) + { + m_Rank = rank; #if defined(__linux) - //m_Name = "/proc/meminfo"; - m_Name = "/proc/self/status"; - Display(tag); + // m_Name = "/proc/meminfo"; + m_Name = "/proc/self/status"; + Display(tag); #else - (void)tag; - m_Name = ""; + (void)tag; + m_Name = ""; #endif - } + } /** * - * Read from /proc/self/status and display the Virtual Memory info at rank 0 on console + * Read from /proc/self/status and display the Virtual Memory info at rank 0 + * on console * * @param tag item name to measure * @param rank MPI rank */ - void Display(const std::string& tag){ - if (0 == m_Name.size()) - return; + void Display(const std::string &tag) + { + if (0 == m_Name.size()) + return; - if (m_Rank > 0) - return; + if (m_Rank > 0) + return; - std::cout<<" memory at: "<( m_End - m_Start ).count(); - double secs = millis/1000.0; - if( m_Rank > 0 ) - return; + double millis = std::chrono::duration_cast( + m_End - m_Start) + .count(); + double secs = millis / 1000.0; + if (m_Rank > 0) + return; std::cout << " [" << m_Tag << "] took:" << secs << " seconds.\n"; - std::cout <<" \t From ProgStart in seconds "<< - std::chrono::duration_cast(m_End - m_ProgStart).count()/1000.0<( + m_End - m_ProgStart) + .count() / + 1000.0 + << std::endl; + + std::cout << std::endl; } + private: std::chrono::time_point m_Start; std::chrono::time_point m_End; @@ -153,7 +165,6 @@ class Timer int m_Rank = 0; }; - /** createData * generate a shared ptr of given size with given type & default value * @@ -163,38 +174,40 @@ class Timer * */ -template -std::shared_ptr< T > createData(const unsigned long& size, const T& val, bool increment=false) - { - auto E = std::shared_ptr< T > { - new T[size], []( T * d ) {delete[] d;} - }; - - for(unsigned long i = 0ul; i < size; i++ ) - { - if (increment) - E.get()[i] = val+i; - else - E.get()[i] = val; - } +template +std::shared_ptr +createData(const unsigned long &size, const T &val, bool increment = false) +{ + auto E = std::shared_ptr{new T[size], [](T *d) { delete[] d; }}; + + for (unsigned long i = 0ul; i < size; i++) + { + if (increment) + E.get()[i] = val + i; + else + E.get()[i] = val; + } return E; - } +} /** Find supported backends * (looking for ADIOS2 or H5) * */ -std::vector getBackends() { +std::vector getBackends() +{ std::vector res; #if openPMD_HAVE_ADIOS2 - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) != "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") != "ADIOS1") res.emplace_back(".bp"); - if( auxiliary::getEnvString( "OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET" ) == "ADIOS" ) + if (auxiliary::getEnvString("OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET") == + "ADIOS") return res; #endif #if openPMD_HAVE_HDF5 - if( auxiliary::getEnvString( "OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET" ) == "HDF5" ) + if (auxiliary::getEnvString("OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET") == + "HDF5") res.clear(); res.emplace_back(".h5"); #endif @@ -210,582 +223,623 @@ std::vector getBackends() { class TestInput { public: - TestInput() = default; - - /* - * Run the read tests - * assumes both GroupBased and fileBased series of this prefix exist. - * @ param prefix file prefix - * e.g. abc.bp (for group/variable based encoding) - * abc (for file based encoding) - * - */ - void run(const std::string& prefix) - { - if (prefix.find(m_Backend) == std::string::npos) { - // file based, default to %07T - std::ostringstream s; - s << prefix << "_%07T" << m_Backend; - std::string filename = s.str(); - read(filename); - } else { - // group or variable based, or filebased with fullname - read(prefix); - } - } // run - - - /* - * read a file - * - * @param filename - * - */ - void - read(const std::string& filename) - { - try { - std::string tag = "Reading: "+filename ; - Timer kk(tag, m_MPIRank); - Series series = Series(filename, Access::READ_ONLY, MPI_COMM_WORLD); - - int numIterations = series.iterations.size(); - - if ( 0 == m_MPIRank ) - { - std::cout << " "< (unsigned long) m_MPISize) || ((unsigned long)m_MPISize % (grid[0]*grid[1]) != 0) ) + TestInput() = default; + + /* + * Run the read tests + * assumes both GroupBased and fileBased series of this prefix exist. + * @ param prefix file prefix + * e.g. abc.bp (for group/variable based encoding) + * abc (for file based encoding) + * + */ + void run(const std::string &prefix) { - if ( 0 == m_MPIRank ) - std::cerr<<" please check the grid decompisition. need to fit given mpi size:"< (unsigned long)m_MPISize) || + ((unsigned long)m_MPISize % (grid[0] * grid[1]) != 0)) + { + if (0 == m_MPIRank) + std::cerr << " please check the grid decompisition. need to " + "fit given mpi size:" + << m_MPISize << std::endl; + return; + } - unsigned long c=1; - for (unsigned long i : grid) { - c = c*i; + if ((meshExtent[0] % grid[0] != 0) || (meshExtent[1] % grid[1] != 0)) + { + if (0 == m_MPIRank) + std::cerr + << " Not able to divide rho mesh by specified grid on X-Y: " + << grid[0] << "*" << grid[1] << std::endl; + return; + } + + Extent count(meshExtent.size(), 1); + count[0] = meshExtent[0] / grid[0]; + count[1] = meshExtent[1] / grid[1]; + + if (meshExtent.size() == 3) + { + grid[2] = m_MPISize / (grid[0] * grid[1]); + count[2] = meshExtent[2] / grid[2]; + } + + unsigned long c = 1; + for (unsigned long i : grid) + { + c = c * i; + } + + if (c != (unsigned long)m_MPISize) + { + if (0 == m_MPIRank) + std::cerr + << " Not able to divide full scan according to input. " + << std::endl; + return; + } + + std::ostringstream s; + s << " Full Scan:"; + Timer fullscanTimer(s.str(), m_MPIRank); + + Offset offset(grid.size(), 0); + + int m = m_MPIRank; + for (int i = (int)grid.size() - 1; i >= 0; i--) + { + offset[i] = m % grid[i]; + m = (m - offset[i]) / grid[i]; + } + + for (unsigned int i = 0; i < grid.size(); i++) + offset[i] *= count[i]; + + auto slice_data = rho.loadChunk(offset, count); + series.flush(); } - if ( c != (unsigned long) m_MPISize ) - { - if ( 0 == m_MPIRank ) - std::cerr<<" Not able to divide full scan according to input. "<=0; i-- ) - { - offset[i] = m % grid[i]; - m = (m - offset[i])/grid[i]; - } - - for (unsigned int i=0; i(offset, count); - series.flush(); - } - - /* - * Read a block on a mesh. - * Chooses block according to 3 digit m_Pattern input: FDP: - * F = fraction (block will be 1/F along a dimension) - * D = blocks grows with this dimenstion among all ranks. - * Invalid D means only rank 0 will read a block - * P = when only rank 0 is active, pick where the block will locate: - * center(0), top left(1), bottom right(2) - * - * @param series input - * @param rho a mesh - * - */ - void - block(Series& series, MeshRecordComponent& rho) - { - if (m_Pattern < 100) return; // slicer - - if (m_Pattern >= 10000) return; // full scan - - unsigned int alongDim = m_Pattern/10 % 10; - - unsigned int fractionOnDim = m_Pattern/100; - - Extent meshExtent = rho.getExtent(); - for (unsigned long i : meshExtent) - { - unsigned long blob = i/fractionOnDim; - if ( 0 == blob ) { - if ( m_MPIRank == 0 ) - std::cout<<"Unable to use franction:"<= 10000) + return; // full scan + + unsigned int alongDim = m_Pattern / 10 % 10; + + unsigned int fractionOnDim = m_Pattern / 100; + + Extent meshExtent = rho.getExtent(); + for (unsigned long i : meshExtent) + { + unsigned long blob = i / fractionOnDim; + if (0 == blob) + { + if (m_MPIRank == 0) + std::cout << "Unable to use franction:" << fractionOnDim + << std::endl; + return; + } } - } - - bool atCenter = ( (m_Pattern % 10 == 0) || (fractionOnDim == 1) ); - bool atTopLeft = ( (m_Pattern % 10 == 1) && (fractionOnDim > 1) ); - bool atBottomRight = ( (m_Pattern % 10 == 2) && (fractionOnDim > 1) ); - bool overlay = ( (m_Pattern % 10 == 3) && (fractionOnDim > 1) ); - - bool rankZeroOnly = ( alongDim == 4); - bool diagnalBlocks = ( alongDim > meshExtent.size() ) && !rankZeroOnly; - - std::ostringstream s; - s <<" Block retrieval fraction=1/"< 1)); + bool atBottomRight = ((m_Pattern % 10 == 2) && (fractionOnDim > 1)); + bool overlay = ((m_Pattern % 10 == 3) && (fractionOnDim > 1)); + + bool rankZeroOnly = (alongDim == 4); + bool diagnalBlocks = (alongDim > meshExtent.size()) && !rankZeroOnly; + + std::ostringstream s; + s << " Block retrieval fraction=1/" << fractionOnDim; + + if (rankZeroOnly) { - if ( atTopLeft ) - off[i] = 0; // top corner - else if ( atBottomRight ) - off[i] = (meshExtent[i]-blob); // bottom corner - else if (atCenter) - off[i] = (fractionOnDim/2) * blob; // middle corner + s << " rank 0 only, location:"; + if (atCenter) + s << " center "; + else if (atTopLeft) + s << " topleft "; + else if (atBottomRight) + s << " bottomRight "; else if (overlay) - off[i] = (fractionOnDim/2) * blob - blob/3; // near middle corner + s << " near center "; } + else if (diagnalBlocks) + s << " blockStyle = diagnal"; else + s << " blockStyle = alongDim" << alongDim; + + if (rankZeroOnly && m_MPIRank) + return; + Timer blockTime(s.str(), m_MPIRank); + + Offset off(meshExtent.size(), 0); + Extent ext(meshExtent.size(), 1); + + for (unsigned int i = 0; i < meshExtent.size(); i++) + { + unsigned long blob = meshExtent[i] / fractionOnDim; + ext[i] = blob; + + if (rankZeroOnly) + { + if (atTopLeft) + off[i] = 0; // top corner + else if (atBottomRight) + off[i] = (meshExtent[i] - blob); // bottom corner + else if (atCenter) + off[i] = (fractionOnDim / 2) * blob; // middle corner + else if (overlay) + off[i] = (fractionOnDim / 2) * blob - + blob / 3; // near middle corner + } + else + { + off[i] = m_MPIRank * blob; + + if (!diagnalBlocks) + if (i != alongDim) + off[i] = (fractionOnDim / 2) * blob; // middle corner + } + } + + auto prettyLambda = [&](Offset oo, Extent cc) { + std::ostringstream o; + o << "[ "; + std::ostringstream c; + c << "[ "; + for (unsigned int k = 0; k < oo.size(); k++) + { + o << oo[k] << " "; + c << cc[k] << " "; + } + std::cout << o.str() << "] + " << c.str() << "]" << std::endl; + ; + }; + + if ((unsigned int)m_MPIRank < fractionOnDim) { - off[i] = m_MPIRank * blob; + auto slice_data = rho.loadChunk(off, ext); + series.flush(); - if ( !diagnalBlocks ) - if ( i != alongDim ) - off[i] = (fractionOnDim/2) * blob; // middle corner + std::cout << " Rank: " << m_MPIRank; + + prettyLambda(off, ext); } - } - - auto prettyLambda = [&](Offset oo, Extent cc) { - std::ostringstream o; o<<"[ "; - std::ostringstream c; c<<"[ "; - for (unsigned int k=0; k= meshExtent.size()) + return false; + + // std::ostringstream s; + if (whichDim == 0) + s << "Row slice time: "; + else if (whichDim == 1) + s << "Col slice time: "; + else + s << "Z slice time: "; + if (rankZeroOnly) + s << " rank 0 only"; + + off[whichDim] = m_MPIRank % meshExtent[whichDim]; + for (unsigned int i = 0; i < meshExtent.size(); i++) + { + if (1 == meshExtent.size()) + whichDim = 100; + if (i != whichDim) + ext[i] = meshExtent[i]; + } + + std::ostringstream so, sc; + so << " Rank: " << m_MPIRank << " offset [ "; + sc << " count[ "; + for (unsigned int i = 0; i < meshExtent.size(); i++) + { + so << off[i] << " "; + sc << ext[i] << " "; + } + so << "]"; + sc << "]"; + std::cout << so.str() << sc.str() << std::endl; + return true; + } + + /* + * read a slice on a mesh + * + * @param series input + * @param rho a mesh + * @param rankZeroOnly only read on rank 0. Other ranks idle + * + */ + void slice( + Series &series, + MeshRecordComponent &rho, + unsigned int whichDim, + bool rankZeroOnly) + { + Extent meshExtent = rho.getExtent(); + + Offset off(meshExtent.size(), 0); + Extent ext(meshExtent.size(), 1); + + std::ostringstream s; + if (!getSlice(meshExtent, whichDim, rankZeroOnly, off, ext, s)) + return; + + Timer sliceTime(s.str(), m_MPIRank); auto slice_data = rho.loadChunk(off, ext); series.flush(); + } - std::cout << " Rank: " << m_MPIRank; - - prettyLambda(off,ext); - } - } - - - /* - * read a slice on a mesh - * - * @param series input - * @param rho a mesh - * @param rankZeroOnly only read on rank 0. Other ranks idle - * - */ - bool - getSlice(Extent meshExtent, unsigned int whichDim, bool rankZeroOnly, - Offset& off, Extent& ext, std::ostringstream& s) - { - if ( rankZeroOnly && m_MPIRank ) - return false; - - if ( !rankZeroOnly && (m_MPISize == 1) ) // rankZero has to be on - //return false; - rankZeroOnly = true; - - //if ( whichDim < 0 ) return false; - - if ( whichDim >= meshExtent.size() ) return false; - - //std::ostringstream s; - if ( whichDim == 0 ) - s << "Row slice time: "; - else if ( whichDim == 1 ) - s << "Col slice time: "; - else - s << "Z slice time: "; - if ( rankZeroOnly ) - s <<" rank 0 only"; - - - off[whichDim] = m_MPIRank % meshExtent[whichDim]; - for ( unsigned int i=0; i(off, ext); - series.flush(); - } - - /* - * Handles 3D mesh read - * @param series openPMD series - * @param rho a mesh - */ - void - sliceMe( Series& series, MeshRecordComponent& rho ) - { - if ( m_Pattern >= 100 ) - return; - - if ( ( m_Pattern % 10 != 3 ) && ( m_Pattern % 10 != 5 ) ) - return; - - bool rankZeroOnly = true; - - if ( m_Pattern % 10 == 5 ) - rankZeroOnly = false; - - unsigned int whichDim = (m_Pattern/10 % 10); // second digit - - slice(series, rho, whichDim, rankZeroOnly); - } - - - /* - * Handles 3D mesh read of magnetic field - * @param series openPMD series - */ - void - sliceField(Series& series, IndexedIteration& iter) - { - if ( m_Pattern >= 100 ) - return; - - if ( ( m_Pattern % 10 != 3 ) && ( m_Pattern % 10 != 5 ) ) - return; - - bool rankZeroOnly = true; - - if ( m_Pattern % 10 == 5 ) - rankZeroOnly = false; - - int whichDim = (m_Pattern/10 % 10); // second digit - - if (whichDim < 5) - return; - whichDim -= 5; - - MeshRecordComponent bx = iter.meshes["B"]["x"]; - Extent meshExtent = bx.getExtent(); - - if ( bx.getExtent().size() != 3) { - if (m_MPIRank == 0) - std::cerr<<" Field needs to be on 3D mesh. "<(off, ext); - auto by_data = by.loadChunk(off, ext); - auto bz_data = bz.loadChunk(off, ext); - - series.flush(); - - } - - /* - * Read an iteration step, mesh & particles - * - * @param Series openPMD series - * @param iter iteration (actual iteration step may not equal to ts) - * @param ts timestep - * - */ - void - readStep( Series& series, IndexedIteration& iter, int ts ) - { - std::string comp_name = openPMD::MeshRecordComponent::SCALAR; - - MeshRecordComponent rho = iter.meshes["rho"][comp_name]; - Extent meshExtent = rho.getExtent(); - - if ( 0 == m_MPIRank ) - { - std::cout << "===> rho meshExtent : ts=" << ts << " ["; - for (unsigned long i : meshExtent) - std::cout< currPatterns; - if (m_Pattern > 0) - currPatterns.push_back(m_Pattern); - else - currPatterns.insert(currPatterns.end(), { 1, 5, 15, 25, 55, 65, 75, 440, 441, 442, 443, 7 }); + /* + * Handles 3D mesh read + * @param series openPMD series + * @param rho a mesh + */ + void sliceMe(Series &series, MeshRecordComponent &rho) + { + if (m_Pattern >= 100) + return; + + if ((m_Pattern % 10 != 3) && (m_Pattern % 10 != 5)) + return; - for(int i : currPatterns) { - m_Pattern = i; - sliceMe(series, rho); - block(series, rho); - fullscan(series, rho); + bool rankZeroOnly = true; - sliceField(series, iter); + if (m_Pattern % 10 == 5) + rankZeroOnly = false; - sliceParticles(series, iter); + unsigned int whichDim = (m_Pattern / 10 % 10); // second digit + + slice(series, rho, whichDim, rankZeroOnly); } - if (currPatterns.size() > 1) - m_Pattern = 0; - } - - /* - * Read a slice of id of the first particle - * - * @param series openPMD Series - * @param iter current iteration - * - */ - void sliceParticles(Series& series, IndexedIteration& iter) - { - // read id of the first particle found - if ( m_Pattern != 7 ) - return; - - if ( 0 == iter.particles.size() ) + + /* + * Handles 3D mesh read of magnetic field + * @param series openPMD series + */ + void sliceField(Series &series, IndexedIteration &iter) { - if ( 0 == m_MPIRank ) - std::cerr << " No Particles found. Skipping particle slicing. " << std::endl; - return; + if (m_Pattern >= 100) + return; + + if ((m_Pattern % 10 != 3) && (m_Pattern % 10 != 5)) + return; + + bool rankZeroOnly = true; + + if (m_Pattern % 10 == 5) + rankZeroOnly = false; + + int whichDim = (m_Pattern / 10 % 10); // second digit + + if (whichDim < 5) + return; + whichDim -= 5; + + MeshRecordComponent bx = iter.meshes["B"]["x"]; + Extent meshExtent = bx.getExtent(); + + if (bx.getExtent().size() != 3) + { + if (m_MPIRank == 0) + std::cerr << " Field needs to be on 3D mesh. " << std::endl; + return; + } + + MeshRecordComponent by = iter.meshes["B"]["y"]; + MeshRecordComponent bz = iter.meshes["B"]["z"]; + + Offset off(meshExtent.size(), 0); + Extent ext(meshExtent.size(), 1); + + std::ostringstream s; + s << " Electric Field slice: "; + if (!getSlice(meshExtent, whichDim, rankZeroOnly, off, ext, s)) + return; + + Timer sliceTime(s.str(), m_MPIRank); + auto bx_data = bx.loadChunk(off, ext); + auto by_data = by.loadChunk(off, ext); + auto bz_data = bz.loadChunk(off, ext); + + series.flush(); } - openPMD::ParticleSpecies p = iter.particles.begin()->second; - RecordComponent idVal = p["id"][RecordComponent::SCALAR]; + /* + * Read an iteration step, mesh & particles + * + * @param Series openPMD series + * @param iter iteration (actual iteration step may not equal to + * ts) + * @param ts timestep + * + */ + void readStep(Series &series, IndexedIteration &iter, int ts) + { + std::string comp_name = openPMD::MeshRecordComponent::SCALAR; - Extent pExtent = idVal.getExtent(); + MeshRecordComponent rho = iter.meshes["rho"][comp_name]; + Extent meshExtent = rho.getExtent(); - auto blob = pExtent[0]/(10*m_MPISize); - if (0 == blob) - return; + if (0 == m_MPIRank) + { + std::cout << "===> rho meshExtent : ts=" << ts << " ["; + for (unsigned long i : meshExtent) + std::cout << i << " "; + std::cout << "]" << std::endl; + } - auto start = pExtent[0]/4; + std::vector currPatterns; + if (m_Pattern > 0) + currPatterns.push_back(m_Pattern); + else + currPatterns.insert( + currPatterns.end(), + {1, 5, 15, 25, 55, 65, 75, 440, 441, 442, 443, 7}); - if (m_MPIRank > 0) - return; + for (int i : currPatterns) + { + m_Pattern = i; + sliceMe(series, rho); + block(series, rho); + fullscan(series, rho); - std::ostringstream s; - s << "particle retrievel time, ["< 1) + m_Pattern = 0; + } - Offset colOff = {m_MPIRank*blob}; - Extent colExt = {blob}; - auto col_data = idVal.loadChunk(colOff, colExt); - series.flush(); - } + /* + * Read a slice of id of the first particle + * + * @param series openPMD Series + * @param iter current iteration + * + */ + void sliceParticles(Series &series, IndexedIteration &iter) + { + // read id of the first particle found + if (m_Pattern != 7) + return; + + if (0 == iter.particles.size()) + { + if (0 == m_MPIRank) + std::cerr << " No Particles found. Skipping particle slicing. " + << std::endl; + return; + } + openPMD::ParticleSpecies p = iter.particles.begin()->second; + RecordComponent idVal = p["id"][RecordComponent::SCALAR]; - int m_MPISize = 1; - int m_MPIRank = 0; + Extent pExtent = idVal.getExtent(); - unsigned int m_Pattern = 30; - std::string m_Backend = ".bp"; + auto blob = pExtent[0] / (10 * m_MPISize); + if (0 == blob) + return; - //std::vector> m_InRankDistribution; -}; // class TestInput + auto start = pExtent[0] / 4; + + if (m_MPIRank > 0) + return; + + std::ostringstream s; + s << "particle retrievel time, [" << start << " + " + << (blob * m_MPISize) << "] "; + + Timer colTime(s.str(), m_MPIRank); + + Offset colOff = {m_MPIRank * blob}; + Extent colExt = {blob}; + auto col_data = idVal.loadChunk(colOff, colExt); + series.flush(); + } + int m_MPISize = 1; + int m_MPIRank = 0; + unsigned int m_Pattern = 30; + std::string m_Backend = ".bp"; + + // std::vector> + // m_InRankDistribution; +}; // class TestInput /** TEST MAIN * * description of runtime options/flags */ -int -main( int argc, char *argv[] ) +int main(int argc, char *argv[]) { - MPI_Init( &argc, &argv ); + MPI_Init(&argc, &argv); TestInput input; - MPI_Comm_size( MPI_COMM_WORLD, &input.m_MPISize ); - MPI_Comm_rank( MPI_COMM_WORLD, &input.m_MPIRank ); - - if (argc < 2) { - if (input.m_MPIRank == 0) - std::cout<<"Usage: "<= 3) { - std::string types = argv[2]; - - if ( types[0] == 'm' ) { - input.m_Pattern = 1; - } else if ( types[0] == 's' ) { - if ( types[1] == 'x') - input.m_Pattern = 5; - if ( types[1] == 'y') - input.m_Pattern = 15; - if ( types[1] == 'z') - input.m_Pattern = 25; - } else if ( types[0] == 'f' ) { - if ( types[1] == 'x') - input.m_Pattern = 55; - if ( types[1] == 'y') - input.m_Pattern = 65; - if ( types[1] == 'z') - input.m_Pattern = 75; - } else { - input.m_Pattern = atoi(argv[2]); - } + if (input.m_MPIRank == 0) + std::cout << "Usage: " << argv[0] << " input_file_prefix" + << std::endl; + MPI_Finalize(); + return 0; } - auto backends = getBackends(); - for ( auto which: backends ) - { - input.m_Backend = which; - input.run(prefix); - } + { + Timer g(" Main ", input.m_MPIRank); + + std::string prefix = argv[1]; + + if (argc >= 3) + { + std::string types = argv[2]; + + if (types[0] == 'm') + { + input.m_Pattern = 1; + } + else if (types[0] == 's') + { + if (types[1] == 'x') + input.m_Pattern = 5; + if (types[1] == 'y') + input.m_Pattern = 15; + if (types[1] == 'z') + input.m_Pattern = 25; + } + else if (types[0] == 'f') + { + if (types[1] == 'x') + input.m_Pattern = 55; + if (types[1] == 'y') + input.m_Pattern = 65; + if (types[1] == 'z') + input.m_Pattern = 75; + } + else + { + input.m_Pattern = atoi(argv[2]); + } + } + + auto backends = getBackends(); + for (auto which : backends) + { + input.m_Backend = which; + input.run(prefix); + } } // Timer g MPI_Finalize(); diff --git a/examples/9_particle_write_serial.py b/examples/9_particle_write_serial.py index 4d96c83592..659ca846d1 100755 --- a/examples/9_particle_write_serial.py +++ b/examples/9_particle_write_serial.py @@ -6,10 +6,9 @@ Authors: Axel Huebl License: LGPLv3+ """ -from openpmd_api import Series, Access, Dataset, Mesh_Record_Component, \ - Unit_Dimension import numpy as np - +from openpmd_api import (Access, Dataset, Mesh_Record_Component, Series, + Unit_Dimension) SCALAR = Mesh_Record_Component.SCALAR diff --git a/include/openPMD/ChunkInfo.hpp b/include/openPMD/ChunkInfo.hpp index 7a5e936c7c..5be3c27b56 100644 --- a/include/openPMD/ChunkInfo.hpp +++ b/include/openPMD/ChunkInfo.hpp @@ -24,7 +24,6 @@ #include - namespace openPMD { /** @@ -41,10 +40,9 @@ struct ChunkInfo * If rank is smaller than zero, will be converted to zero. */ explicit ChunkInfo() = default; - ChunkInfo( Offset, Extent ); + ChunkInfo(Offset, Extent); - bool - operator==( ChunkInfo const & other ) const; + bool operator==(ChunkInfo const &other) const; }; /** @@ -68,12 +66,11 @@ struct WrittenChunkInfo : ChunkInfo /* * If rank is smaller than zero, will be converted to zero. */ - WrittenChunkInfo( Offset, Extent, int sourceID ); - WrittenChunkInfo( Offset, Extent ); + WrittenChunkInfo(Offset, Extent, int sourceID); + WrittenChunkInfo(Offset, Extent); - bool - operator==( WrittenChunkInfo const & other ) const; + bool operator==(WrittenChunkInfo const &other) const; }; -using ChunkTable = std::vector< WrittenChunkInfo >; +using ChunkTable = std::vector; } // namespace openPMD diff --git a/include/openPMD/Dataset.hpp b/include/openPMD/Dataset.hpp index 88fa5a4e49..444d8c1c67 100644 --- a/include/openPMD/Dataset.hpp +++ b/include/openPMD/Dataset.hpp @@ -23,15 +23,14 @@ #include "openPMD/Datatype.hpp" #include +#include #include #include -#include - namespace openPMD { -using Extent = std::vector< std::uint64_t >; -using Offset = std::vector< std::uint64_t >; +using Extent = std::vector; +using Offset = std::vector; class Dataset { @@ -46,12 +45,12 @@ class Dataset * Helpful for resizing datasets, since datatypes need not be given twice. * */ - Dataset( Extent ); + Dataset(Extent); - Dataset& extend(Extent newExtent); - Dataset& setChunkSize(Extent const&); - Dataset& setCompression(std::string const&, uint8_t const); - Dataset& setCustomTransform(std::string const&); + Dataset &extend(Extent newExtent); + Dataset &setChunkSize(Extent const &); + Dataset &setCompression(std::string const &, uint8_t const); + Dataset &setCustomTransform(std::string const &); Extent extent; Datatype dtype; diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index 6f349c1180..02a0d49f37 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -44,11 +44,22 @@ constexpr int HIGHEST_DATATYPE = 1000; */ enum class Datatype : int { - CHAR = LOWEST_DATATYPE, UCHAR, // SCHAR, - SHORT, INT, LONG, LONGLONG, - USHORT, UINT, ULONG, ULONGLONG, - FLOAT, DOUBLE, LONG_DOUBLE, - CFLOAT, CDOUBLE, CLONG_DOUBLE, + CHAR = LOWEST_DATATYPE, + UCHAR, // SCHAR, + SHORT, + INT, + LONG, + LONGLONG, + USHORT, + UINT, + ULONG, + ULONGLONG, + FLOAT, + DOUBLE, + LONG_DOUBLE, + CFLOAT, + CDOUBLE, + CLONG_DOUBLE, STRING, VEC_CHAR, VEC_SHORT, @@ -81,7 +92,7 @@ enum class Datatype : int * listed in order in a vector. * */ -extern std::vector< Datatype > openPMD_Datatypes; +extern std::vector openPMD_Datatypes; /** @brief Fundamental equivalence check for two given types T and U. * @@ -92,125 +103,322 @@ extern std::vector< Datatype > openPMD_Datatypes; * @tparam T first type * @tparam U second type */ -template< - typename T, - typename U -> -struct decay_equiv : - std::is_same< - typename std::remove_pointer< - typename std::remove_cv< - typename std::decay< - typename std::remove_all_extents< T >::type - >::type - >::type - >::type, - typename std::remove_pointer< - typename std::remove_cv< - typename std::decay< - typename std::remove_all_extents< U >::type - >::type - >::type - >::type - >::type -{ }; - -template< - typename T, - typename U -> -constexpr bool decay_equiv_v = decay_equiv< T, U >::value; - -template< typename T > -inline -constexpr -Datatype -determineDatatype() +template +struct decay_equiv + : std::is_same< + typename std::remove_pointer::type>:: + type>::type>::type, + typename std::remove_pointer::type>:: + type>::type>::type>::type +{}; + +template +constexpr bool decay_equiv_v = decay_equiv::value; + +template +inline constexpr Datatype determineDatatype() { using DT = Datatype; - if( decay_equiv< T, char >::value ){ return DT::CHAR; } - else if( decay_equiv< T, unsigned char >::value ){ return DT::UCHAR; } - else if( decay_equiv< T, short >::value ){ return DT::SHORT; } - else if( decay_equiv< T, int >::value ){ return DT::INT; } - else if( decay_equiv< T, long >::value ){ return DT::LONG; } - else if( decay_equiv< T, long long >::value ){ return DT::LONGLONG; } - else if( decay_equiv< T, unsigned short >::value ){ return DT::USHORT; } - else if( decay_equiv< T, unsigned int >::value ){ return DT::UINT; } - else if( decay_equiv< T, unsigned long >::value ){ return DT::ULONG; } - else if( decay_equiv< T, unsigned long long >::value ){ return DT::ULONGLONG; } - else if( decay_equiv< T, float >::value ){ return DT::FLOAT; } - else if( decay_equiv< T, double >::value ){ return DT::DOUBLE; } - else if( decay_equiv< T, long double >::value ){ return DT::LONG_DOUBLE; } - else if( decay_equiv< T, std::complex< float > >::value ){ return DT::CFLOAT; } - else if( decay_equiv< T, std::complex< double > >::value ){ return DT::CDOUBLE; } - else if( decay_equiv< T, std::complex< long double > >::value ){ return DT::CLONG_DOUBLE; } - else if( decay_equiv< T, std::string >::value ){ return DT::STRING; } - else if( decay_equiv< T, std::vector< char > >::value ){ return DT::VEC_CHAR; } - else if( decay_equiv< T, std::vector< short > >::value ){ return DT::VEC_SHORT; } - else if( decay_equiv< T, std::vector< int > >::value ){ return DT::VEC_INT; } - else if( decay_equiv< T, std::vector< long > >::value ){ return DT::VEC_LONG; } - else if( decay_equiv< T, std::vector< long long > >::value ){ return DT::VEC_LONGLONG; } - else if( decay_equiv< T, std::vector< unsigned char > >::value ){ return DT::VEC_UCHAR; } - else if( decay_equiv< T, std::vector< unsigned short > >::value ){ return DT::VEC_USHORT; } - else if( decay_equiv< T, std::vector< unsigned int > >::value ){ return DT::VEC_UINT; } - else if( decay_equiv< T, std::vector< unsigned long > >::value ){ return DT::VEC_ULONG; } - else if( decay_equiv< T, std::vector< unsigned long long > >::value ){ return DT::VEC_ULONGLONG; } - else if( decay_equiv< T, std::vector< float > >::value ){ return DT::VEC_FLOAT; } - else if( decay_equiv< T, std::vector< double > >::value ){ return DT::VEC_DOUBLE; } - else if( decay_equiv< T, std::vector< long double > >::value ){ return DT::VEC_LONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< float > > >::value ){ return DT::VEC_CFLOAT; } - else if( decay_equiv< T, std::vector< std::complex< double > > >::value ){ return DT::VEC_CDOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< long double > > >::value ){ return DT::VEC_CLONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::string > >::value ){ return DT::VEC_STRING; } - else if( decay_equiv< T, std::array< double, 7 > >::value ){ return DT::ARR_DBL_7; } - else if( decay_equiv< T, bool >::value ){ return DT::BOOL; } - else return Datatype::UNDEFINED; + if (decay_equiv::value) + { + return DT::CHAR; + } + else if (decay_equiv::value) + { + return DT::UCHAR; + } + else if (decay_equiv::value) + { + return DT::SHORT; + } + else if (decay_equiv::value) + { + return DT::INT; + } + else if (decay_equiv::value) + { + return DT::LONG; + } + else if (decay_equiv::value) + { + return DT::LONGLONG; + } + else if (decay_equiv::value) + { + return DT::USHORT; + } + else if (decay_equiv::value) + { + return DT::UINT; + } + else if (decay_equiv::value) + { + return DT::ULONG; + } + else if (decay_equiv::value) + { + return DT::ULONGLONG; + } + else if (decay_equiv::value) + { + return DT::FLOAT; + } + else if (decay_equiv::value) + { + return DT::DOUBLE; + } + else if (decay_equiv::value) + { + return DT::LONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CFLOAT; + } + else if (decay_equiv>::value) + { + return DT::CDOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CLONG_DOUBLE; + } + else if (decay_equiv::value) + { + return DT::STRING; + } + else if (decay_equiv>::value) + { + return DT::VEC_CHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_SHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_INT; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_UCHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_USHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_UINT; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_FLOAT; + } + else if (decay_equiv>::value) + { + return DT::VEC_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG_DOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CFLOAT; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CDOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CLONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_STRING; + } + else if (decay_equiv>::value) + { + return DT::ARR_DBL_7; + } + else if (decay_equiv::value) + { + return DT::BOOL; + } + else + return Datatype::UNDEFINED; } -template< typename T > -inline -constexpr -Datatype -determineDatatype(std::shared_ptr< T >) +template +inline constexpr Datatype determineDatatype(std::shared_ptr) { using DT = Datatype; - if( decay_equiv< T, char >::value ){ return DT::CHAR; } - else if( decay_equiv< T, unsigned char >::value ){ return DT::UCHAR; } - else if( decay_equiv< T, short >::value ){ return DT::SHORT; } - else if( decay_equiv< T, int >::value ){ return DT::INT; } - else if( decay_equiv< T, long >::value ){ return DT::LONG; } - else if( decay_equiv< T, long long >::value ){ return DT::LONGLONG; } - else if( decay_equiv< T, unsigned short >::value ){ return DT::USHORT; } - else if( decay_equiv< T, unsigned int >::value ){ return DT::UINT; } - else if( decay_equiv< T, unsigned long >::value ){ return DT::ULONG; } - else if( decay_equiv< T, unsigned long long >::value ){ return DT::ULONGLONG; } - else if( decay_equiv< T, float >::value ){ return DT::FLOAT; } - else if( decay_equiv< T, double >::value ){ return DT::DOUBLE; } - else if( decay_equiv< T, long double >::value ){ return DT::LONG_DOUBLE; } - else if( decay_equiv< T, std::complex< float > >::value ){ return DT::CFLOAT; } - else if( decay_equiv< T, std::complex< double > >::value ){ return DT::CDOUBLE; } - else if( decay_equiv< T, std::complex< long double > >::value ){ return DT::CLONG_DOUBLE; } - else if( decay_equiv< T, std::string >::value ){ return DT::STRING; } - else if( decay_equiv< T, std::vector< char > >::value ){ return DT::VEC_CHAR; } - else if( decay_equiv< T, std::vector< short > >::value ){ return DT::VEC_SHORT; } - else if( decay_equiv< T, std::vector< int > >::value ){ return DT::VEC_INT; } - else if( decay_equiv< T, std::vector< long > >::value ){ return DT::VEC_LONG; } - else if( decay_equiv< T, std::vector< long long > >::value ){ return DT::VEC_LONGLONG; } - else if( decay_equiv< T, std::vector< unsigned char > >::value ){ return DT::VEC_UCHAR; } - else if( decay_equiv< T, std::vector< unsigned short > >::value ){ return DT::VEC_USHORT; } - else if( decay_equiv< T, std::vector< unsigned int > >::value ){ return DT::VEC_UINT; } - else if( decay_equiv< T, std::vector< unsigned long > >::value ){ return DT::VEC_ULONG; } - else if( decay_equiv< T, std::vector< unsigned long long > >::value ){ return DT::VEC_ULONGLONG; } - else if( decay_equiv< T, std::vector< float > >::value ){ return DT::VEC_FLOAT; } - else if( decay_equiv< T, std::vector< double > >::value ){ return DT::VEC_DOUBLE; } - else if( decay_equiv< T, std::vector< long double > >::value ){ return DT::VEC_LONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< float > > >::value ){ return DT::VEC_CFLOAT; } - else if( decay_equiv< T, std::vector< std::complex< double > > >::value ){ return DT::VEC_CDOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< long double > > >::value ){ return DT::VEC_CLONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::string > >::value ){ return DT::VEC_STRING; } - else if( decay_equiv< T, std::array< double, 7 > >::value ){ return DT::ARR_DBL_7; } - else if( decay_equiv< T, bool >::value ){ return DT::BOOL; } - else return DT::UNDEFINED; + if (decay_equiv::value) + { + return DT::CHAR; + } + else if (decay_equiv::value) + { + return DT::UCHAR; + } + else if (decay_equiv::value) + { + return DT::SHORT; + } + else if (decay_equiv::value) + { + return DT::INT; + } + else if (decay_equiv::value) + { + return DT::LONG; + } + else if (decay_equiv::value) + { + return DT::LONGLONG; + } + else if (decay_equiv::value) + { + return DT::USHORT; + } + else if (decay_equiv::value) + { + return DT::UINT; + } + else if (decay_equiv::value) + { + return DT::ULONG; + } + else if (decay_equiv::value) + { + return DT::ULONGLONG; + } + else if (decay_equiv::value) + { + return DT::FLOAT; + } + else if (decay_equiv::value) + { + return DT::DOUBLE; + } + else if (decay_equiv::value) + { + return DT::LONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CFLOAT; + } + else if (decay_equiv>::value) + { + return DT::CDOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CLONG_DOUBLE; + } + else if (decay_equiv::value) + { + return DT::STRING; + } + else if (decay_equiv>::value) + { + return DT::VEC_CHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_SHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_INT; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_UCHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_USHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_UINT; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_FLOAT; + } + else if (decay_equiv>::value) + { + return DT::VEC_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG_DOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CFLOAT; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CDOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CLONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_STRING; + } + else if (decay_equiv>::value) + { + return DT::ARR_DBL_7; + } + else if (decay_equiv::value) + { + return DT::BOOL; + } + else + return DT::UNDEFINED; } /** Return number of bytes representing a Datatype @@ -218,72 +426,71 @@ determineDatatype(std::shared_ptr< T >) * @param d Datatype * @return number of bytes */ -inline size_t -toBytes( Datatype d ) +inline size_t toBytes(Datatype d) { using DT = Datatype; - switch( d ) - { - case DT::CHAR: - case DT::VEC_CHAR: - case DT::STRING: - case DT::VEC_STRING: - return sizeof(char); - case DT::UCHAR: - case DT::VEC_UCHAR: - return sizeof(unsigned char); - // case DT::SCHAR: - // case DT::VEC_SCHAR: - // return sizeof(signed char); - case DT::SHORT: - case DT::VEC_SHORT: - return sizeof(short); - case DT::INT: - case DT::VEC_INT: - return sizeof(int); - case DT::LONG: - case DT::VEC_LONG: - return sizeof(long); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return sizeof(long long); - case DT::USHORT: - case DT::VEC_USHORT: - return sizeof(unsigned short); - case DT::UINT: - case DT::VEC_UINT: - return sizeof(unsigned int); - case DT::ULONG: - case DT::VEC_ULONG: - return sizeof(unsigned long); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return sizeof(unsigned long long); - case DT::FLOAT: - case DT::VEC_FLOAT: - return sizeof(float); - case DT::DOUBLE: - case DT::VEC_DOUBLE: - case DT::ARR_DBL_7: - return sizeof(double); - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return sizeof(long double); - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return sizeof(float) * 2; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return sizeof(double) * 2; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return sizeof(long double) * 2; - case DT::BOOL: - return sizeof(bool); - case DT::DATATYPE: - case DT::UNDEFINED: - default: - throw std::runtime_error("toBytes: Invalid datatype!"); + switch (d) + { + case DT::CHAR: + case DT::VEC_CHAR: + case DT::STRING: + case DT::VEC_STRING: + return sizeof(char); + case DT::UCHAR: + case DT::VEC_UCHAR: + return sizeof(unsigned char); + // case DT::SCHAR: + // case DT::VEC_SCHAR: + // return sizeof(signed char); + case DT::SHORT: + case DT::VEC_SHORT: + return sizeof(short); + case DT::INT: + case DT::VEC_INT: + return sizeof(int); + case DT::LONG: + case DT::VEC_LONG: + return sizeof(long); + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return sizeof(long long); + case DT::USHORT: + case DT::VEC_USHORT: + return sizeof(unsigned short); + case DT::UINT: + case DT::VEC_UINT: + return sizeof(unsigned int); + case DT::ULONG: + case DT::VEC_ULONG: + return sizeof(unsigned long); + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return sizeof(unsigned long long); + case DT::FLOAT: + case DT::VEC_FLOAT: + return sizeof(float); + case DT::DOUBLE: + case DT::VEC_DOUBLE: + case DT::ARR_DBL_7: + return sizeof(double); + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return sizeof(long double); + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return sizeof(float) * 2; + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return sizeof(double) * 2; + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return sizeof(long double) * 2; + case DT::BOOL: + return sizeof(bool); + case DT::DATATYPE: + case DT::UNDEFINED: + default: + throw std::runtime_error("toBytes: Invalid datatype!"); } } @@ -292,10 +499,9 @@ toBytes( Datatype d ) * @param d Datatype * @return number of bits */ -inline size_t -toBits( Datatype d ) +inline size_t toBits(Datatype d) { - return toBytes( d ) * CHAR_BIT; + return toBytes(d) * CHAR_BIT; } /** Compare if a Datatype is a vector type @@ -303,33 +509,32 @@ toBits( Datatype d ) * @param d Datatype to test * @return true if vector type, else false */ -inline bool -isVector( Datatype d ) +inline bool isVector(Datatype d) { using DT = Datatype; - switch( d ) - { - case DT::VEC_CHAR: - case DT::VEC_SHORT: - case DT::VEC_INT: - case DT::VEC_LONG: - case DT::VEC_LONGLONG: - case DT::VEC_UCHAR: - case DT::VEC_USHORT: - case DT::VEC_UINT: - case DT::VEC_ULONG: - case DT::VEC_ULONGLONG: - case DT::VEC_FLOAT: - case DT::VEC_DOUBLE: - case DT::VEC_LONG_DOUBLE: - case DT::VEC_CFLOAT: - case DT::VEC_CDOUBLE: - case DT::VEC_CLONG_DOUBLE: - case DT::VEC_STRING: - return true; - default: - return false; + switch (d) + { + case DT::VEC_CHAR: + case DT::VEC_SHORT: + case DT::VEC_INT: + case DT::VEC_LONG: + case DT::VEC_LONGLONG: + case DT::VEC_UCHAR: + case DT::VEC_USHORT: + case DT::VEC_UINT: + case DT::VEC_ULONG: + case DT::VEC_ULONGLONG: + case DT::VEC_FLOAT: + case DT::VEC_DOUBLE: + case DT::VEC_LONG_DOUBLE: + case DT::VEC_CFLOAT: + case DT::VEC_CDOUBLE: + case DT::VEC_CLONG_DOUBLE: + case DT::VEC_STRING: + return true; + default: + return false; } } @@ -340,23 +545,22 @@ isVector( Datatype d ) * @param d Datatype to test * @return true if floating point, otherwise false */ -inline bool -isFloatingPoint( Datatype d ) +inline bool isFloatingPoint(Datatype d) { using DT = Datatype; - switch( d ) + switch (d) { - case DT::FLOAT: - case DT::VEC_FLOAT: - case DT::DOUBLE: - case DT::VEC_DOUBLE: - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: + case DT::FLOAT: + case DT::VEC_FLOAT: + case DT::DOUBLE: + case DT::VEC_DOUBLE: + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: // note: complex floats are not std::is_floating_point - return true; - default: - return false; + return true; + default: + return false; } } @@ -367,22 +571,21 @@ isFloatingPoint( Datatype d ) * @param d Datatype to test * @return true if complex floating point, otherwise false */ -inline bool -isComplexFloatingPoint( Datatype d ) +inline bool isComplexFloatingPoint(Datatype d) { using DT = Datatype; - switch( d ) + switch (d) { - case DT::CFLOAT: - case DT::VEC_CFLOAT: - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return true; - default: - return false; + case DT::CFLOAT: + case DT::VEC_CFLOAT: + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return true; + default: + return false; } } @@ -393,13 +596,12 @@ isComplexFloatingPoint( Datatype d ) * @tparam T type to test * @return true if floating point, otherwise false */ -template< typename T > -inline bool -isFloatingPoint() +template +inline bool isFloatingPoint() { - Datatype dtype = determineDatatype< T >(); + Datatype dtype = determineDatatype(); - return isFloatingPoint( dtype ); + return isFloatingPoint(dtype); } /** Compare if a type is a complex floating point type @@ -409,11 +611,10 @@ isFloatingPoint() * @tparam T type to test * @return true if complex floating point, otherwise false */ -template< typename T > -inline bool -isComplexFloatingPoint() +template +inline bool isComplexFloatingPoint() { - Datatype dtype = determineDatatype< T >(); + Datatype dtype = determineDatatype(); return isComplexFloatingPoint(dtype); } @@ -426,33 +627,32 @@ isComplexFloatingPoint() * @param d Datatype to test * @return std::tuple with isInteger and isSigned result */ -inline std::tuple< bool, bool > -isInteger( Datatype d ) +inline std::tuple isInteger(Datatype d) { using DT = Datatype; - switch( d ) - { - case DT::SHORT: - case DT::VEC_SHORT: - case DT::INT: - case DT::VEC_INT: - case DT::LONG: - case DT::VEC_LONG: - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return std::make_tuple( true, true ); - case DT::USHORT: - case DT::VEC_USHORT: - case DT::UINT: - case DT::VEC_UINT: - case DT::ULONG: - case DT::VEC_ULONG: - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return std::make_tuple( true, false ); - default: - return std::make_tuple( false, false ); + switch (d) + { + case DT::SHORT: + case DT::VEC_SHORT: + case DT::INT: + case DT::VEC_INT: + case DT::LONG: + case DT::VEC_LONG: + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return std::make_tuple(true, true); + case DT::USHORT: + case DT::VEC_USHORT: + case DT::UINT: + case DT::VEC_UINT: + case DT::ULONG: + case DT::VEC_ULONG: + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return std::make_tuple(true, false); + default: + return std::make_tuple(false, false); } } @@ -464,13 +664,12 @@ isInteger( Datatype d ) * @tparam T type to test * @return std::tuple with isInteger and isSigned result */ -template< typename T > -inline std::tuple< bool, bool > -isInteger() +template +inline std::tuple isInteger() { - Datatype dtype = determineDatatype< T >(); + Datatype dtype = determineDatatype(); - return isInteger( dtype ); + return isInteger(dtype); } /** Compare if a Datatype is equivalent to a floating point type @@ -479,21 +678,16 @@ isInteger() * @param d Datatype to compare * @return true if both types are floating point and same bitness, else false */ -template< typename T_FP > -inline bool -isSameFloatingPoint( Datatype d ) +template +inline bool isSameFloatingPoint(Datatype d) { // template - bool tt_is_fp = isFloatingPoint< T_FP >(); + bool tt_is_fp = isFloatingPoint(); // Datatype - bool dt_is_fp = isFloatingPoint( d ); + bool dt_is_fp = isFloatingPoint(d); - if( - tt_is_fp && - dt_is_fp && - toBits( d ) == toBits( determineDatatype< T_FP >() ) - ) + if (tt_is_fp && dt_is_fp && toBits(d) == toBits(determineDatatype())) return true; else return false; @@ -503,23 +697,20 @@ isSameFloatingPoint( Datatype d ) * * @tparam T_CFP complex floating point type to compare * @param d Datatype to compare - * @return true if both types are complex floating point and same bitness, else false + * @return true if both types are complex floating point and same bitness, else + * false */ -template< typename T_CFP > -inline bool -isSameComplexFloatingPoint( Datatype d ) +template +inline bool isSameComplexFloatingPoint(Datatype d) { // template - bool tt_is_cfp = isComplexFloatingPoint< T_CFP >(); + bool tt_is_cfp = isComplexFloatingPoint(); // Datatype - bool dt_is_cfp = isComplexFloatingPoint( d ); + bool dt_is_cfp = isComplexFloatingPoint(d); - if( - tt_is_cfp && - dt_is_cfp && - toBits( d ) == toBits( determineDatatype< T_CFP >() ) - ) + if (tt_is_cfp && dt_is_cfp && + toBits(d) == toBits(determineDatatype())) return true; else return false; @@ -529,26 +720,22 @@ isSameComplexFloatingPoint( Datatype d ) * * @tparam T_Int signed or unsigned integer type to compare * @param d Datatype to compare - * @return true if both types are integers, same signed and same bitness, else false + * @return true if both types are integers, same signed and same bitness, else + * false */ -template< typename T_Int > -inline bool -isSameInteger( Datatype d ) +template +inline bool isSameInteger(Datatype d) { // template bool tt_is_int, tt_is_sig; - std::tie(tt_is_int, tt_is_sig) = isInteger< T_Int >(); + std::tie(tt_is_int, tt_is_sig) = isInteger(); // Datatype bool dt_is_int, dt_is_sig; - std::tie(dt_is_int, dt_is_sig) = isInteger( d ); - - if( - tt_is_int && - dt_is_int && - tt_is_sig == dt_is_sig && - toBits( d ) == toBits( determineDatatype< T_Int >() ) - ) + std::tie(dt_is_int, dt_is_sig) = isInteger(d); + + if (tt_is_int && dt_is_int && tt_is_sig == dt_is_sig && + toBits(d) == toBits(determineDatatype())) return true; else return false; @@ -560,84 +747,74 @@ isSameInteger( Datatype d ) * some platforms, e.g. if long and long long are the same or double and * long double will also return true. */ -inline bool -isSame( openPMD::Datatype const d, openPMD::Datatype const e ) +inline bool isSame(openPMD::Datatype const d, openPMD::Datatype const e) { // exact same type - if( static_cast(d) == static_cast(e) ) + if (static_cast(d) == static_cast(e)) return true; - bool d_is_vec = isVector( d ); - bool e_is_vec = isVector( e ); + bool d_is_vec = isVector(d); + bool e_is_vec = isVector(e); // same int bool d_is_int, d_is_sig; - std::tie(d_is_int, d_is_sig) = isInteger( d ); + std::tie(d_is_int, d_is_sig) = isInteger(d); bool e_is_int, e_is_sig; - std::tie(e_is_int, e_is_sig) = isInteger( e ); - if( - d_is_int && - e_is_int && - d_is_vec == e_is_vec && - d_is_sig == e_is_sig && - toBits( d ) == toBits( e ) - ) + std::tie(e_is_int, e_is_sig) = isInteger(e); + if (d_is_int && e_is_int && d_is_vec == e_is_vec && d_is_sig == e_is_sig && + toBits(d) == toBits(e)) return true; // same float - bool d_is_fp = isFloatingPoint( d ); - bool e_is_fp = isFloatingPoint( e ); - - if( - d_is_fp && - e_is_fp && - d_is_vec == e_is_vec && - toBits( d ) == toBits( e ) - ) + bool d_is_fp = isFloatingPoint(d); + bool e_is_fp = isFloatingPoint(e); + + if (d_is_fp && e_is_fp && d_is_vec == e_is_vec && toBits(d) == toBits(e)) return true; // same complex floating point bool d_is_cfp = isComplexFloatingPoint(d); bool e_is_cfp = isComplexFloatingPoint(e); - if( - d_is_cfp && - e_is_cfp && - d_is_vec == e_is_vec && - toBits( d ) == toBits( e ) - ) + if (d_is_cfp && e_is_cfp && d_is_vec == e_is_vec && toBits(d) == toBits(e)) return true; return false; } -namespace detail { - template - struct BasicDatatypeHelper { +namespace detail +{ + template + struct BasicDatatypeHelper + { Datatype m_dt = determineDatatype(); }; - template - struct BasicDatatypeHelper> { + template + struct BasicDatatypeHelper> + { Datatype m_dt = BasicDatatypeHelper{}.m_dt; }; - template - struct BasicDatatypeHelper> { + template + struct BasicDatatypeHelper> + { Datatype m_dt = BasicDatatypeHelper{}.m_dt; }; - struct BasicDatatype { + struct BasicDatatype + { template Datatype operator()(); template Datatype operator()(); }; -} +} // namespace detail /** - * @brief basicDatatype Strip openPMD Datatype of std::vector, std::array et. al. + * @brief basicDatatype Strip openPMD Datatype of std::vector, std::array et. + * al. * @param dt The "full" Datatype. * @return The "inner" Datatype. */ @@ -645,17 +822,13 @@ Datatype basicDatatype(Datatype dt); Datatype toVectorType(Datatype dt); -std::string datatypeToString( Datatype dt ); +std::string datatypeToString(Datatype dt); -Datatype stringToDatatype( std::string s ); +Datatype stringToDatatype(std::string s); -void -warnWrongDtype(std::string const& key, - Datatype store, - Datatype request); +void warnWrongDtype(std::string const &key, Datatype store, Datatype request); -std::ostream& -operator<<(std::ostream&, openPMD::Datatype const&); +std::ostream &operator<<(std::ostream &, openPMD::Datatype const &); } // namespace openPMD @@ -672,14 +845,12 @@ operator<<(std::ostream&, openPMD::Datatype const&); * * @{ */ -inline bool -operator==( openPMD::Datatype d, openPMD::Datatype e ) +inline bool operator==(openPMD::Datatype d, openPMD::Datatype e) { return openPMD::isSame(d, e); } -inline bool -operator!=( openPMD::Datatype d, openPMD::Datatype e ) +inline bool operator!=(openPMD::Datatype d, openPMD::Datatype e) { return !(d == e); } diff --git a/include/openPMD/DatatypeHelpers.hpp b/include/openPMD/DatatypeHelpers.hpp index d9714e8648..c07f34ad77 100644 --- a/include/openPMD/DatatypeHelpers.hpp +++ b/include/openPMD/DatatypeHelpers.hpp @@ -29,7 +29,7 @@ namespace openPMD { -#if defined( _MSC_VER ) && !defined( __INTEL_COMPILER ) && !defined( __clang__ ) +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__clang__) #define OPENPMD_TEMPLATE_OPERATOR operator #else #define OPENPMD_TEMPLATE_OPERATOR template operator @@ -37,78 +37,78 @@ namespace openPMD namespace detail { -// std::void_t is C++17 -template< typename > -using void_t = void; + // std::void_t is C++17 + template + using void_t = void; -/* - * Check whether class T has a member "errorMsg" convertible - * to type std::string. - * Used to give helpful compile-time error messages with static_assert - * down in CallUndefinedDatatype. - */ -template< typename T, typename = void > -struct HasErrorMessageMember -{ - static constexpr bool value = false; -}; + /* + * Check whether class T has a member "errorMsg" convertible + * to type std::string. + * Used to give helpful compile-time error messages with static_assert + * down in CallUndefinedDatatype. + */ + template + struct HasErrorMessageMember + { + static constexpr bool value = false; + }; -template< typename T > -struct HasErrorMessageMember< - T, - void_t< decltype( std::string( std::declval< T >().errorMsg ) ) > > -{ - static constexpr bool value = true; -}; + template + struct HasErrorMessageMember< + T, + void_t().errorMsg))> > + { + static constexpr bool value = true; + }; -/** - * Purpose of this struct is to detect at compile time whether - * Action::template operator()\<0\>() exists. If yes, call - * Action::template operator()\() with the passed arguments. - * If not, throw an error. - * - * @tparam n As in switchType(). - * @tparam ReturnType As in switchType(). - * @tparam Action As in switchType(). - * @tparam Placeholder For SFINAE, set to void. - * @tparam Args As in switchType(). - */ -template< - int n, - typename ReturnType, - typename Action, - typename Placeholder, - typename... Args > -struct CallUndefinedDatatype -{ - static ReturnType call( Action action, Args &&... ) + /** + * Purpose of this struct is to detect at compile time whether + * Action::template operator()\<0\>() exists. If yes, call + * Action::template operator()\() with the passed arguments. + * If not, throw an error. + * + * @tparam n As in switchType(). + * @tparam ReturnType As in switchType(). + * @tparam Action As in switchType(). + * @tparam Placeholder For SFINAE, set to void. + * @tparam Args As in switchType(). + */ + template < + int n, + typename ReturnType, + typename Action, + typename Placeholder, + typename... Args> + struct CallUndefinedDatatype { - static_assert( - HasErrorMessageMember< Action >::value, - "[switchType] Action needs either an errorMsg member of type " - "std::string or operator()() overloads." ); - throw std::runtime_error( - "[" + std::string( action.errorMsg ) + "] Unknown Datatype." ); - } -}; + static ReturnType call(Action action, Args &&...) + { + static_assert( + HasErrorMessageMember::value, + "[switchType] Action needs either an errorMsg member of type " + "std::string or operator()() overloads."); + throw std::runtime_error( + "[" + std::string(action.errorMsg) + "] Unknown Datatype."); + } + }; -template< int n, typename ReturnType, typename Action, typename... Args > -struct CallUndefinedDatatype< - n, - ReturnType, - Action, - // Enable this, if no error message member is found. - // action.template operator()() will be called instead - typename std::enable_if< !HasErrorMessageMember< Action >::value >::type, - Args... > -{ - static ReturnType call( Action action, Args &&... args ) + template + struct CallUndefinedDatatype< + n, + ReturnType, + Action, + // Enable this, if no error message member is found. + // action.template operator()() will be called instead + typename std::enable_if::value>::type, + Args...> { - return action.OPENPMD_TEMPLATE_OPERATOR()< n >( - std::forward< Args >( args )... ); - } -}; -} + static ReturnType call(Action action, Args &&...args) + { + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); + } + }; +} // namespace detail /** * Generalizes switching over an openPMD datatype. @@ -127,153 +127,144 @@ struct CallUndefinedDatatype< * the passed arguments and the template parameter type corresponding to the * openPMD type. */ -template< typename Action, typename... Args > -auto switchType( Datatype dt, Action action, Args &&... args ) -> decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ) +template +auto switchType(Datatype dt, Action action, Args &&...args) + -> decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)) { - using ReturnType = decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UCHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::SHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::INT: - return action.OPENPMD_TEMPLATE_OPERATOR()< int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::USHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UINT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::FLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< float >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG_DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< long double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::CFLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< float > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::CDOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::CLONG_DOUBLE: - return action - .OPENPMD_TEMPLATE_OPERATOR()< std::complex< long double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::STRING: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::string >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::VEC_CHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< char > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_SHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< short > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_INT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< int > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_LONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< long > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_LONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< long long > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_UCHAR: - return action - .OPENPMD_TEMPLATE_OPERATOR()< std::vector< unsigned char > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_USHORT: - return action - .OPENPMD_TEMPLATE_OPERATOR()< std::vector< unsigned short > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_UINT: - return action - .OPENPMD_TEMPLATE_OPERATOR()< std::vector< unsigned int > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_ULONG: - return action - .OPENPMD_TEMPLATE_OPERATOR()< std::vector< unsigned long > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_ULONGLONG: return action - .OPENPMD_TEMPLATE_OPERATOR()< std::vector< unsigned long long > >( - std::forward< Args >( args )... ); + .OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_FLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< float > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_LONG_DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< long double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::VEC_CFLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< - std::vector< std::complex< float > > >( - std::forward< Args >( args )... ); + return action + .OPENPMD_TEMPLATE_OPERATOR() > >( + std::forward(args)...); case Datatype::VEC_CDOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< - std::vector< std::complex< double > > >( - std::forward< Args >( args )... ); + return action + .OPENPMD_TEMPLATE_OPERATOR() > >( + std::forward(args)...); case Datatype::VEC_CLONG_DOUBLE: return action.OPENPMD_TEMPLATE_OPERATOR()< - std::vector< std::complex< long double > > >( - std::forward< Args >( args )... ); + std::vector > >( + std::forward(args)...); case Datatype::VEC_STRING: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::vector< std::string > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::ARR_DBL_7: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::array< double, 7 > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::BOOL: - return action.OPENPMD_TEMPLATE_OPERATOR()< bool >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DATATYPE: return detail::CallUndefinedDatatype< HIGHEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); case Datatype::UNDEFINED: return detail::CallUndefinedDatatype< LOWEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } @@ -295,93 +286,87 @@ auto switchType( Datatype dt, Action action, Args &&... args ) -> decltype( * the passed arguments and the template parameter type corresponding to the * openPMD type. */ -template< typename Action, typename... Args > -auto switchNonVectorType( Datatype dt, Action action, Args &&... args ) - -> decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ) +template +auto switchNonVectorType(Datatype dt, Action action, Args &&...args) + -> decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)) { - using ReturnType = decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UCHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::SHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::INT: - return action.OPENPMD_TEMPLATE_OPERATOR()< int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::USHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UINT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::FLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< float >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG_DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< long double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::CFLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< float > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::CDOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::CLONG_DOUBLE: - return action - .OPENPMD_TEMPLATE_OPERATOR()< std::complex< long double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::STRING: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::string >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::BOOL: - return action.OPENPMD_TEMPLATE_OPERATOR()< bool >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DATATYPE: return detail::CallUndefinedDatatype< HIGHEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); case Datatype::UNDEFINED: return detail::CallUndefinedDatatype< LOWEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } #undef OPENPMD_TEMPLATE_OPERATOR -} +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp index ff11cd0122..5fb12ccdad 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp @@ -20,207 +20,217 @@ */ #pragma once +#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" -#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" #include #include #include -#include #include +#include #include #include - namespace openPMD { inline std::string -getBP1Extent(Extent const& e, std::string const& delimiter = ",") +getBP1Extent(Extent const &e, std::string const &delimiter = ",") { - switch( e.size() ) + switch (e.size()) { - case 0: - return ""; - case 1: - return std::to_string(e[0]); - default: - std::ostringstream os; - std::for_each(e.begin(), - e.end()-1, - [&os, &delimiter](std::uint64_t const ext) { os << std::to_string(ext) << delimiter; }); - os << std::to_string(*e.rbegin()); - return os.str(); + case 0: + return ""; + case 1: + return std::to_string(e[0]); + default: + std::ostringstream os; + std::for_each( + e.begin(), e.end() - 1, [&os, &delimiter](std::uint64_t const ext) { + os << std::to_string(ext) << delimiter; + }); + os << std::to_string(*e.rbegin()); + return os.str(); } } inline std::string -getZerosLikeBP1Extent(Extent const& e, std::string const& delimiter = ",") +getZerosLikeBP1Extent(Extent const &e, std::string const &delimiter = ",") { - switch( e.size() ) + switch (e.size()) { - case 0: - return ""; - case 1: - return "0"; - default: - std::ostringstream os; - std::for_each(e.begin(), - e.end()-1, - [&os, &delimiter](std::uint64_t const) { os << "0" << delimiter; }); - os << "0"; - return os.str(); + case 0: + return ""; + case 1: + return "0"; + default: + std::ostringstream os; + std::for_each( + e.begin(), e.end() - 1, [&os, &delimiter](std::uint64_t const) { + os << "0" << delimiter; + }); + os << "0"; + return os.str(); } } -inline ADIOS_DATATYPES -getBP1DataType(Datatype dtype) +inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) { using DT = Datatype; // note the ill-named fixed-byte adios_... types // https://github.com/ornladios/ADIOS/issues/187 - switch( dtype ) + switch (dtype) { - case DT::CHAR: - case DT::VEC_CHAR: - return adios_byte; - case DT::UCHAR: - case DT::VEC_UCHAR: - case DT::BOOL: - return adios_unsigned_byte; - case DT::SHORT: - case DT::VEC_SHORT: - if( sizeof(short) == 2u ) - return adios_short; - else if( sizeof(short) == 4u ) - return adios_integer; - else if( sizeof(long) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::SHORT found."); - case DT::INT: - case DT::VEC_INT: - if( sizeof(int) == 2u ) - return adios_short; - else if( sizeof(int) == 4u ) - return adios_integer; - else if( sizeof(int) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::INT found."); - case DT::LONG: - case DT::VEC_LONG: - if( sizeof(long) == 2u ) - return adios_short; - else if( sizeof(long) == 4u ) - return adios_integer; - else if( sizeof(long) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::LONG found."); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - if( sizeof(long long) == 2u ) - return adios_short; - else if( sizeof(long long) == 4u ) - return adios_integer; - else if( sizeof(long long) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::LONGLONG found."); - case DT::USHORT: - case DT::VEC_USHORT: - if( sizeof(unsigned short) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned short) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned long) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::USHORT found."); - case DT::UINT: - case DT::VEC_UINT: - if( sizeof(unsigned int) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned int) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned int) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::UINT found."); - case DT::ULONG: - case DT::VEC_ULONG: - if( sizeof(unsigned long) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned long) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned long) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::ULONG found."); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - if( sizeof(unsigned long long) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned long long) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned long long) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::ULONGLONG found."); - case DT::FLOAT: - case DT::VEC_FLOAT: - return adios_real; - case DT::DOUBLE: - case DT::ARR_DBL_7: - case DT::VEC_DOUBLE: - return adios_double; - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return adios_long_double; - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return adios_complex; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return adios_double_complex; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - throw unsupported_data_error("No native equivalent for Datatype::CLONG_DOUBLE found."); - case DT::STRING: - return adios_string; - case DT::VEC_STRING: - return adios_string_array; - case DT::DATATYPE: - throw std::runtime_error("Meta-Datatype leaked into IO"); - case DT::UNDEFINED: - throw std::runtime_error("Unknown Attribute datatype (ADIOS datatype)"); - default: - throw std::runtime_error("Datatype not implemented in ADIOS IO"); + case DT::CHAR: + case DT::VEC_CHAR: + return adios_byte; + case DT::UCHAR: + case DT::VEC_UCHAR: + case DT::BOOL: + return adios_unsigned_byte; + case DT::SHORT: + case DT::VEC_SHORT: + if (sizeof(short) == 2u) + return adios_short; + else if (sizeof(short) == 4u) + return adios_integer; + else if (sizeof(long) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::SHORT found."); + case DT::INT: + case DT::VEC_INT: + if (sizeof(int) == 2u) + return adios_short; + else if (sizeof(int) == 4u) + return adios_integer; + else if (sizeof(int) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::INT found."); + case DT::LONG: + case DT::VEC_LONG: + if (sizeof(long) == 2u) + return adios_short; + else if (sizeof(long) == 4u) + return adios_integer; + else if (sizeof(long) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::LONG found."); + case DT::LONGLONG: + case DT::VEC_LONGLONG: + if (sizeof(long long) == 2u) + return adios_short; + else if (sizeof(long long) == 4u) + return adios_integer; + else if (sizeof(long long) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::LONGLONG found."); + case DT::USHORT: + case DT::VEC_USHORT: + if (sizeof(unsigned short) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned short) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned long) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::USHORT found."); + case DT::UINT: + case DT::VEC_UINT: + if (sizeof(unsigned int) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned int) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned int) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::UINT found."); + case DT::ULONG: + case DT::VEC_ULONG: + if (sizeof(unsigned long) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned long) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned long) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::ULONG found."); + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + if (sizeof(unsigned long long) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned long long) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned long long) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::ULONGLONG found."); + case DT::FLOAT: + case DT::VEC_FLOAT: + return adios_real; + case DT::DOUBLE: + case DT::ARR_DBL_7: + case DT::VEC_DOUBLE: + return adios_double; + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return adios_long_double; + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return adios_complex; + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return adios_double_complex; + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + throw unsupported_data_error( + "No native equivalent for Datatype::CLONG_DOUBLE found."); + case DT::STRING: + return adios_string; + case DT::VEC_STRING: + return adios_string_array; + case DT::DATATYPE: + throw std::runtime_error("Meta-Datatype leaked into IO"); + case DT::UNDEFINED: + throw std::runtime_error("Unknown Attribute datatype (ADIOS datatype)"); + default: + throw std::runtime_error("Datatype not implemented in ADIOS IO"); } } -inline std::string -concrete_bp1_file_position(Writable* w) +inline std::string concrete_bp1_file_position(Writable *w) { - std::stack< Writable* > hierarchy; - if( !w->abstractFilePosition ) + std::stack hierarchy; + if (!w->abstractFilePosition) w = w->parent; - while( w ) + while (w) { hierarchy.push(w); w = w->parent; } std::string pos; - while( !hierarchy.empty() ) + while (!hierarchy.empty()) { - auto const tmp_ptr = std::dynamic_pointer_cast< ADIOS1FilePosition >(hierarchy.top()->abstractFilePosition); - if( tmp_ptr == nullptr ) - throw std::runtime_error("Dynamic pointer cast returned a nullptr!"); + auto const tmp_ptr = std::dynamic_pointer_cast( + hierarchy.top()->abstractFilePosition); + if (tmp_ptr == nullptr) + throw std::runtime_error( + "Dynamic pointer cast returned a nullptr!"); pos += tmp_ptr->location; hierarchy.pop(); } @@ -229,15 +239,15 @@ concrete_bp1_file_position(Writable* w) } inline std::string -getEnvNum(std::string const& key, std::string const& defaultValue) +getEnvNum(std::string const &key, std::string const &defaultValue) { - char const* env = std::getenv(key.c_str()); - if( env != nullptr ) + char const *env = std::getenv(key.c_str()); + if (env != nullptr) { - char const* tmp = env; - while( tmp ) + char const *tmp = env; + while (tmp) { - if( isdigit(*tmp) ) + if (isdigit(*tmp)) ++tmp; else { @@ -245,23 +255,23 @@ getEnvNum(std::string const& key, std::string const& defaultValue) break; } } - if( !tmp ) + if (!tmp) return std::string(env, std::strlen(env)); else return defaultValue; - } else + } + else return defaultValue; } -template -inline Attribute -readVectorAttributeInternal( void* data, int size ) +template +inline Attribute readVectorAttributeInternal(void *data, int size) { - auto d = reinterpret_cast< T* >(data); - std::vector< T > v; + auto d = reinterpret_cast(data); + std::vector v; v.resize(size); - for( int i = 0; i < size; ++i ) + for (int i = 0; i < size; ++i) v[i] = d[i]; return Attribute(v); } -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp b/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp index e37e086597..2c761cba50 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp @@ -22,15 +22,13 @@ #include "openPMD/IO/AbstractFilePosition.hpp" - namespace openPMD { struct ADIOS1FilePosition : public AbstractFilePosition { - ADIOS1FilePosition(std::string const& s) - : location{s} - { } + ADIOS1FilePosition(std::string const &s) : location{s} + {} std::string location; -}; //ADIOS1FilePosition -} // openPMD +}; // ADIOS1FilePosition +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp index d80d6ddf24..98cbdcb86e 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp @@ -20,56 +20,61 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Export.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/config.hpp" #include #include #include #if openPMD_HAVE_ADIOS1 -# include +#include #endif - namespace openPMD { - class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl; #if openPMD_HAVE_ADIOS1 - class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler - { - friend class ADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler +{ + friend class ADIOS1IOHandlerImpl; - public: - ADIOS1IOHandler(std::string path, Access); - ~ADIOS1IOHandler() override; +public: + ADIOS1IOHandler(std::string path, Access); + ~ADIOS1IOHandler() override; - std::string backendName() const override { return "ADIOS1"; } + std::string backendName() const override + { + return "ADIOS1"; + } - std::future< void > flush() override; + std::future flush(internal::FlushParams const &) override; - void enqueue(IOTask const&) override; + void enqueue(IOTask const &) override; - private: - std::queue< IOTask > m_setup; - std::unique_ptr< ADIOS1IOHandlerImpl > m_impl; - }; // ADIOS1IOHandler +private: + std::queue m_setup; + std::unique_ptr m_impl; +}; // ADIOS1IOHandler #else - class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler - { - friend class ADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler +{ + friend class ADIOS1IOHandlerImpl; - public: - ADIOS1IOHandler(std::string path, Access); - ~ADIOS1IOHandler() override; +public: + ADIOS1IOHandler(std::string path, Access); + ~ADIOS1IOHandler() override; - std::string backendName() const override { return "DUMMY_ADIOS1"; } + std::string backendName() const override + { + return "DUMMY_ADIOS1"; + } - std::future< void > flush() override; + std::future flush(internal::FlushParams const &) override; - private: - std::unique_ptr< ADIOS1IOHandlerImpl > m_impl; - }; // ADIOS1IOHandler +private: + std::unique_ptr m_impl; +}; // ADIOS1IOHandler #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp b/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp index e66116f070..b35f504ee8 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp @@ -20,83 +20,105 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Export.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS1 -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" -# include +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include #endif #include #include #include #if openPMD_HAVE_ADIOS1 -# include -# include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_ADIOS1 - class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl : public AbstractIOHandlerImpl - { - public: - ADIOS1IOHandlerImpl(AbstractIOHandler*); - virtual ~ADIOS1IOHandlerImpl(); +class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl : public AbstractIOHandlerImpl +{ +public: + ADIOS1IOHandlerImpl(AbstractIOHandler *); + virtual ~ADIOS1IOHandlerImpl(); - virtual void init(); + virtual void init(); - std::future< void > flush() override; + std::future flush(); - void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override; - void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override; - void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override; - void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override; - void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override; - void closeFile(Writable*, Parameter< Operation::CLOSE_FILE > const&) override; - void availableChunks(Writable*, Parameter< Operation::AVAILABLE_CHUNKS > &) override; - void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override; - void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override; - void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override; - void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override; - void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override; - void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override; - void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override; - void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override; - void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override; - void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override; - void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override; - void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override; - void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override; + void + createFile(Writable *, Parameter const &) override; + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; + void extendDataset( + Writable *, Parameter const &) override; + void openFile(Writable *, Parameter const &) override; + void + closeFile(Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; + void openPath(Writable *, Parameter const &) override; + void openDataset(Writable *, Parameter &) override; + void + deleteFile(Writable *, Parameter const &) override; + void + deletePath(Writable *, Parameter const &) override; + void deleteDataset( + Writable *, Parameter const &) override; + void deleteAttribute( + Writable *, Parameter const &) override; + void writeDataset( + Writable *, Parameter const &) override; + void writeAttribute( + Writable *, Parameter const &) override; + void readDataset(Writable *, Parameter &) override; + void readAttribute(Writable *, Parameter &) override; + void listPaths(Writable *, Parameter &) override; + void + listDatasets(Writable *, Parameter &) override; + void listAttributes(Writable *, Parameter &) override; - virtual int64_t open_write(Writable *); - virtual ADIOS_FILE* open_read(std::string const & name); - void close(int64_t); - void close(ADIOS_FILE*); - int64_t initialize_group(std::string const& name); - void flush_attribute(int64_t group, std::string const& name, Attribute const&); + virtual int64_t open_write(Writable *); + virtual ADIOS_FILE *open_read(std::string const &name); + void close(int64_t); + void close(ADIOS_FILE *); + int64_t initialize_group(std::string const &name); + void + flush_attribute(int64_t group, std::string const &name, Attribute const &); - protected: - ADIOS_READ_METHOD m_readMethod; - std::unordered_map< Writable*, std::shared_ptr< std::string > > m_filePaths; - std::unordered_map< std::shared_ptr< std::string >, int64_t > m_groups; - std::unordered_map< std::shared_ptr< std::string >, bool > m_existsOnDisk; - std::unordered_map< std::shared_ptr< std::string >, int64_t > m_openWriteFileHandles; - std::unordered_map< std::shared_ptr< std::string >, ADIOS_FILE* > m_openReadFileHandles; - std::unordered_map< ADIOS_FILE*, std::vector< ADIOS_SELECTION* > > m_scheduledReads; - std::unordered_map< int64_t, std::unordered_map< std::string, Attribute > > m_attributeWrites; - /** - * Call this function to get adios file id for a Writable. Will create one if does not exist - * @return returns an adios file id. - */ - int64_t GetFileHandle(Writable*); - }; // ADIOS1IOHandlerImpl -#else - class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl +protected: + ADIOS_READ_METHOD m_readMethod; + std::unordered_map > m_filePaths; + std::unordered_map, int64_t> m_groups; + std::unordered_map, bool> m_existsOnDisk; + std::unordered_map, int64_t> + m_openWriteFileHandles; + std::unordered_map, ADIOS_FILE *> + m_openReadFileHandles; + struct ScheduledRead { - }; // ADIOS1IOHandlerImpl + ADIOS_SELECTION *selection; + std::shared_ptr data; // needed to avoid early freeing + }; + std::unordered_map > + m_scheduledReads; + std::unordered_map > + m_attributeWrites; + /** + * Call this function to get adios file id for a Writable. Will create one + * if does not exist + * @return returns an adios file id. + */ + int64_t GetFileHandle(Writable *); +}; // ADIOS1IOHandlerImpl +#else +class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl +{}; // ADIOS1IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp b/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp index 26cc560dc2..e7e681cc4f 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp @@ -23,16 +23,16 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS2 -# include "openPMD/Dataset.hpp" -# include "openPMD/Datatype.hpp" -# include "openPMD/DatatypeHelpers.hpp" +#include "openPMD/Dataset.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/DatatypeHelpers.hpp" -# include +#include -# include -# include -# include -# include +#include +#include +#include +#include namespace openPMD { @@ -43,33 +43,37 @@ namespace detail // we represent booleans as unsigned chars using bool_representation = unsigned char; - template < typename T > struct ToDatatypeHelper + template + struct ToDatatypeHelper { - static std::string type( ); + static std::string type(); }; - template < typename T > struct ToDatatypeHelper< std::vector< T > > + template + struct ToDatatypeHelper > { - static std::string type( ); + static std::string type(); }; - template < typename T, size_t n > - struct ToDatatypeHelper< std::array< T, n > > + template + struct ToDatatypeHelper > { - static std::string type( ); + static std::string type(); }; - template <> struct ToDatatypeHelper< bool > + template <> + struct ToDatatypeHelper { - static std::string type( ); + static std::string type(); }; struct ToDatatype { - template < typename T > std::string operator( )( ); + template + std::string operator()(); - - template < int n > std::string operator( )( ); + template + std::string operator()(); }; /** @@ -78,7 +82,7 @@ namespace detail * @param verbose If false, don't print warnings. * @return */ - Datatype fromADIOS2Type( std::string const & dt, bool verbose = true ); + Datatype fromADIOS2Type(std::string const &dt, bool verbose = true); enum class VariableOrAttribute : unsigned char { @@ -88,15 +92,14 @@ namespace detail struct AttributeInfo { - template< typename T > - Extent - operator()( + template + Extent operator()( adios2::IO &, - std::string const & attributeName, - VariableOrAttribute ); + std::string const &attributeName, + VariableOrAttribute); - template < int n, typename... Params > - Extent operator( )( Params &&... ); + template + Extent operator()(Params &&...); }; /** @@ -111,15 +114,14 @@ namespace detail * @return The openPMD datatype corresponding to the type of the attribute. * UNDEFINED if attribute is not found. */ - Datatype - attributeInfo( - adios2::IO & IO, - std::string const & attributeName, + Datatype attributeInfo( + adios2::IO &IO, + std::string const &attributeName, bool verbose, - VariableOrAttribute voa = VariableOrAttribute::Attribute ); + VariableOrAttribute voa = VariableOrAttribute::Attribute); } // namespace detail -#if defined( _MSC_VER ) && !defined( __INTEL_COMPILER ) && !defined( __clang__ ) +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__clang__) #define OPENPMD_TEMPLATE_OPERATOR operator #else #define OPENPMD_TEMPLATE_OPERATOR template operator @@ -143,90 +145,85 @@ namespace detail * the passed arguments and the template parameter type corresponding to the * openPMD type. */ -template< typename Action, typename... Args > -auto switchAdios2AttributeType( Datatype dt, Action action, Args &&... args ) - -> decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ) +template +auto switchAdios2AttributeType(Datatype dt, Action action, Args &&...args) + -> decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)) { - using ReturnType = decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UCHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::SHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::INT: - return action.OPENPMD_TEMPLATE_OPERATOR()< int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::USHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UINT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::FLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< float >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG_DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< long double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::CFLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< float > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::CDOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); // missing std::complex< long double > type in ADIOS2 v2.6.0 // case Datatype::CLONG_DOUBLE: // return action // .OPENPMD_TEMPLATE_OPERATOR()< std::complex< long double > >( // std::forward< Args >( args )... ); case Datatype::STRING: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::string >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DATATYPE: return detail::CallUndefinedDatatype< HIGHEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); case Datatype::UNDEFINED: return detail::CallUndefinedDatatype< LOWEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } @@ -249,62 +246,59 @@ auto switchAdios2AttributeType( Datatype dt, Action action, Args &&... args ) * the passed arguments and the template parameter type corresponding to the * openPMD type. */ -template< typename Action, typename... Args > -auto switchAdios2VariableType( Datatype dt, Action action, Args &&... args ) - -> decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ) +template +auto switchAdios2VariableType(Datatype dt, Action action, Args &&...args) + -> decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)) { - using ReturnType = decltype( - action.OPENPMD_TEMPLATE_OPERATOR() < char > - ( std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(action.OPENPMD_TEMPLATE_OPERATOR() < char > (std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UCHAR: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned char >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::SHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::INT: - return action.OPENPMD_TEMPLATE_OPERATOR()< int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::USHORT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned short >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::UINT: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned int >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::ULONGLONG: - return action.OPENPMD_TEMPLATE_OPERATOR()< unsigned long long >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::FLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< float >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::LONG_DOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< long double >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR()( + std::forward(args)...); case Datatype::CFLOAT: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< float > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); case Datatype::CDOUBLE: - return action.OPENPMD_TEMPLATE_OPERATOR()< std::complex< double > >( - std::forward< Args >( args )... ); + return action.OPENPMD_TEMPLATE_OPERATOR() >( + std::forward(args)...); // missing std::complex< long double > type in ADIOS2 v2.6.0 // case Datatype::CLONG_DOUBLE: // return action @@ -316,20 +310,18 @@ auto switchAdios2VariableType( Datatype dt, Action action, Args &&... args ) ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); case Datatype::UNDEFINED: return detail::CallUndefinedDatatype< LOWEST_DATATYPE, ReturnType, Action, void, - Args &&... >:: - call( std::move( action ), std::forward< Args >( args )... ); + Args &&...>::call(std::move(action), std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } diff --git a/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp b/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp index 3bff7e80dd..7203c33077 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp @@ -20,50 +20,35 @@ */ #pragma once - #include "openPMD/IO/AbstractFilePosition.hpp" #include #include - namespace openPMD { - struct ADIOS2FilePosition : - public AbstractFilePosition +struct ADIOS2FilePosition : public AbstractFilePosition +{ + enum class GD { - enum class GD - { - GROUP, - DATASET - }; - - - ADIOS2FilePosition( - std::string s, - GD groupOrDataset - ) : - location { std::move( s ) }, - gd { groupOrDataset } - {} - - - explicit ADIOS2FilePosition( GD groupOrDataset ) : - ADIOS2FilePosition { - "/", - groupOrDataset - } - {} - - - ADIOS2FilePosition( ) : - ADIOS2FilePosition{ GD::GROUP } - {} - - - /** - * Convention: Starts with slash '/', ends without. - */ - std::string location; - GD gd; - }; // ADIOS2FilePosition -} // openPMD + GROUP, + DATASET + }; + + ADIOS2FilePosition(std::string s, GD groupOrDataset) + : location{std::move(s)}, gd{groupOrDataset} + {} + + explicit ADIOS2FilePosition(GD groupOrDataset) + : ADIOS2FilePosition{"/", groupOrDataset} + {} + + ADIOS2FilePosition() : ADIOS2FilePosition{GD::GROUP} + {} + + /** + * Convention: Starts with slash '/', ends without. + */ + std::string location; + GD gd; +}; // ADIOS2FilePosition +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 46826fe7d7..f789c9aca9 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -20,25 +20,25 @@ */ #pragma once -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/IO/AbstractIOHandlerImpl.hpp" -#include "openPMD/IO/AbstractIOHandlerImplCommon.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2FilePosition.hpp" #include "openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/IO/AbstractIOHandlerImplCommon.hpp" #include "openPMD/IO/IOTask.hpp" #include "openPMD/IO/InvalidatableFile.hpp" +#include "openPMD/IterationEncoding.hpp" #include "openPMD/auxiliary/JSON.hpp" #include "openPMD/auxiliary/Option.hpp" #include "openPMD/backend/Writable.hpp" #include "openPMD/config.hpp" -#include "openPMD/IterationEncoding.hpp" #if openPMD_HAVE_ADIOS2 -# include +#include #endif #if openPMD_HAVE_MPI -# include +#include #endif #include @@ -53,7 +53,6 @@ #include // pair #include - namespace openPMD { #if openPMD_HAVE_ADIOS2 @@ -62,17 +61,20 @@ class ADIOS2IOHandler; namespace detail { - template < typename, typename > struct DatasetHelper; + template + struct DatasetHelper; struct GetSpan; struct DatasetReader; struct AttributeReader; struct AttributeWriter; struct OldAttributeReader; struct OldAttributeWriter; - template < typename > struct AttributeTypes; + template + struct AttributeTypes; struct DatasetOpener; struct VariableDefiner; - template < typename > struct DatasetTypes; + template + struct DatasetTypes; struct WriteDataset; struct BufferedActions; struct BufferedPut; @@ -81,7 +83,6 @@ namespace detail struct BufferedAttributeWrite; } // namespace detail - namespace ADIOS2Schema { using schema_t = uint64_t; @@ -99,129 +100,114 @@ namespace ADIOS2Schema s_0000_00_00, s_2021_02_09 }; -} +} // namespace ADIOS2Schema using SupportedSchema = ADIOS2Schema::SupportedSchema; class ADIOS2IOHandlerImpl -: public AbstractIOHandlerImplCommon< ADIOS2FilePosition > + : public AbstractIOHandlerImplCommon { - template < typename, typename > friend struct detail::DatasetHelper; + template + friend struct detail::DatasetHelper; friend struct detail::GetSpan; friend struct detail::DatasetReader; friend struct detail::AttributeReader; friend struct detail::AttributeWriter; friend struct detail::OldAttributeReader; friend struct detail::OldAttributeWriter; - template < typename > friend struct detail::AttributeTypes; + template + friend struct detail::AttributeTypes; friend struct detail::DatasetOpener; friend struct detail::VariableDefiner; - template < typename > friend struct detail::DatasetTypes; + template + friend struct detail::DatasetTypes; friend struct detail::WriteDataset; friend struct detail::BufferedActions; friend struct detail::BufferedAttributeRead; - static constexpr bool ADIOS2_DEBUG_MODE = false; - - public: - #if openPMD_HAVE_MPI ADIOS2IOHandlerImpl( AbstractIOHandler *, MPI_Comm, nlohmann::json config, - std::string engineType ); + std::string engineType); #endif // openPMD_HAVE_MPI explicit ADIOS2IOHandlerImpl( - AbstractIOHandler *, - nlohmann::json config, - std::string engineType ); - + AbstractIOHandler *, nlohmann::json config, std::string engineType); ~ADIOS2IOHandlerImpl() override; - std::future< void > flush( ) override; - - void createFile( Writable *, - Parameter< Operation::CREATE_FILE > const & ) override; - - void createPath( Writable *, - Parameter< Operation::CREATE_PATH > const & ) override; + std::future flush(internal::FlushParams const &); void - createDataset( Writable *, - Parameter< Operation::CREATE_DATASET > const & ) override; + createFile(Writable *, Parameter const &) override; void - extendDataset( Writable *, - Parameter< Operation::EXTEND_DATASET > const & ) override; + createPath(Writable *, Parameter const &) override; + + void createDataset( + Writable *, Parameter const &) override; - void openFile( Writable *, - Parameter< Operation::OPEN_FILE > const & ) override; + void extendDataset( + Writable *, Parameter const &) override; - void closeFile( Writable *, - Parameter< Operation::CLOSE_FILE > const & ) override; + void openFile(Writable *, Parameter const &) override; - void openPath( Writable *, - Parameter< Operation::OPEN_PATH > const & ) override; + void + closeFile(Writable *, Parameter const &) override; - void closePath( Writable *, - Parameter< Operation::CLOSE_PATH > const & ) override; + void openPath(Writable *, Parameter const &) override; - void openDataset( Writable *, - Parameter< Operation::OPEN_DATASET > & ) override; + void + closePath(Writable *, Parameter const &) override; - void deleteFile( Writable *, - Parameter< Operation::DELETE_FILE > const & ) override; + void openDataset(Writable *, Parameter &) override; - void deletePath( Writable *, - Parameter< Operation::DELETE_PATH > const & ) override; + void + deleteFile(Writable *, Parameter const &) override; void - deleteDataset( Writable *, - Parameter< Operation::DELETE_DATASET > const & ) override; + deletePath(Writable *, Parameter const &) override; - void deleteAttribute( Writable *, - Parameter< Operation::DELETE_ATT > const & ) override; + void deleteDataset( + Writable *, Parameter const &) override; - void writeDataset( Writable *, - Parameter< Operation::WRITE_DATASET > const & ) override; + void deleteAttribute( + Writable *, Parameter const &) override; - void writeAttribute( Writable *, - Parameter< Operation::WRITE_ATT > const & ) override; + void writeDataset( + Writable *, Parameter const &) override; - void readDataset( Writable *, - Parameter< Operation::READ_DATASET > & ) override; + void writeAttribute( + Writable *, Parameter const &) override; - void getBufferView( Writable *, - Parameter< Operation::GET_BUFFER_VIEW > & ) override; + void readDataset(Writable *, Parameter &) override; - void readAttribute( Writable *, - Parameter< Operation::READ_ATT > & ) override; + void + getBufferView(Writable *, Parameter &) override; - void listPaths( Writable *, Parameter< Operation::LIST_PATHS > & ) override; + void readAttribute(Writable *, Parameter &) override; - void listDatasets( Writable *, - Parameter< Operation::LIST_DATASETS > & ) override; + void listPaths(Writable *, Parameter &) override; void - listAttributes( Writable *, - Parameter< Operation::LIST_ATTS > & parameters ) override; + listDatasets(Writable *, Parameter &) override; - void - advance( Writable*, Parameter< Operation::ADVANCE > & ) override; + void listAttributes( + Writable *, Parameter ¶meters) override; - void - availableChunks( Writable*, - Parameter< Operation::AVAILABLE_CHUNKS > &) override; + void advance(Writable *, Parameter &) override; + + void availableChunks( + Writable *, Parameter &) override; /** * @brief The ADIOS2 access type to chose for Engines opened * within this instance. */ - adios2::Mode adios2AccessMode( std::string const & fullPath ); + adios2::Mode adios2AccessMode(std::string const &fullPath); private: adios2::ADIOS m_ADIOS; @@ -253,7 +239,7 @@ class ADIOS2IOHandlerImpl inline SupportedSchema schema() const { - switch( m_schema ) + switch (m_schema) { case ADIOS2Schema::schema_0000_00_00: return SupportedSchema::s_0000_00_00; @@ -262,20 +248,20 @@ class ADIOS2IOHandlerImpl default: throw std::runtime_error( "[ADIOS2] Encountered unsupported schema version: " + - std::to_string( m_schema ) ); + std::to_string(m_schema)); } } inline AttributeLayout attributeLayout() const { - switch( schema() ) + switch (schema()) { case SupportedSchema::s_0000_00_00: return AttributeLayout::ByAdiosAttributes; case SupportedSchema::s_2021_02_09: return AttributeLayout::ByAdiosVariables; } - throw std::runtime_error( "Unreachable!" ); + throw std::runtime_error("Unreachable!"); } struct ParameterizedOperator @@ -284,21 +270,19 @@ class ADIOS2IOHandlerImpl adios2::Params params; }; - std::vector< ParameterizedOperator > defaultOperators; + std::vector defaultOperators; auxiliary::TracingJSON m_config; static auxiliary::TracingJSON nullvalue; - void - init( nlohmann::json config ); + void init(nlohmann::json config); - template< typename Key > - auxiliary::TracingJSON - config( Key && key, auxiliary::TracingJSON & cfg ) + template + auxiliary::TracingJSON config(Key &&key, auxiliary::TracingJSON &cfg) { - if( cfg.json().is_object() && cfg.json().contains( key ) ) + if (cfg.json().is_object() && cfg.json().contains(key)) { - return cfg[ key ]; + return cfg[key]; } else { @@ -306,11 +290,10 @@ class ADIOS2IOHandlerImpl } } - template< typename Key > - auxiliary::TracingJSON - config( Key && key ) + template + auxiliary::TracingJSON config(Key &&key) { - return config< Key >( std::forward< Key >( key ), m_config ); + return config(std::forward(key), m_config); } /** @@ -320,15 +303,13 @@ class ADIOS2IOHandlerImpl * @return first parameter: the operators, second parameters: whether * operators have been configured */ - auxiliary::Option< std::vector< ParameterizedOperator > > - getOperators( auxiliary::TracingJSON config ); + auxiliary::Option > + getOperators(auxiliary::TracingJSON config); // use m_config - auxiliary::Option< std::vector< ParameterizedOperator > > - getOperators(); + auxiliary::Option > getOperators(); - std::string - fileSuffix() const; + std::string fileSuffix() const; /* * We need to give names to IO objects. These names are irrelevant @@ -350,31 +331,32 @@ class ADIOS2IOHandlerImpl * IO and Engine object. * Not to be accessed directly, use getFileData(). */ - std::unordered_map< InvalidatableFile, - std::unique_ptr< detail::BufferedActions > - > m_fileData; + std::unordered_map< + InvalidatableFile, + std::unique_ptr > + m_fileData; - std::map< std::string, adios2::Operator > m_operators; + std::map m_operators; // Overrides from AbstractIOHandlerImplCommon. std::string - filePositionToString( std::shared_ptr< ADIOS2FilePosition > ) override; + filePositionToString(std::shared_ptr) override; - std::shared_ptr< ADIOS2FilePosition > - extendFilePosition( std::shared_ptr< ADIOS2FilePosition > const & pos, - std::string extend ) override; + std::shared_ptr extendFilePosition( + std::shared_ptr const &pos, + std::string extend) override; // Helper methods. - auxiliary::Option< adios2::Operator > - getCompressionOperator( std::string const & compression ); + auxiliary::Option + getCompressionOperator(std::string const &compression); /* * The name of the ADIOS2 variable associated with this Writable. * To be used for Writables that represent a dataset. */ - std::string nameOfVariable( Writable * writable ); + std::string nameOfVariable(Writable *writable); /** * @brief nameOfAttribute @@ -385,13 +367,13 @@ class ADIOS2IOHandlerImpl * (possibly the empty string, representing no variable) * and the actual name. */ - std::string nameOfAttribute( Writable * writable, std::string attribute ); + std::string nameOfAttribute(Writable *writable, std::string attribute); /* * Figure out whether the Writable corresponds with a * group or a dataset. */ - ADIOS2FilePosition::GD groupOrDataset( Writable * ); + ADIOS2FilePosition::GD groupOrDataset(Writable *); enum class IfFileNotOpen : bool { @@ -399,10 +381,9 @@ class ADIOS2IOHandlerImpl ThrowError }; - detail::BufferedActions & - getFileData( InvalidatableFile file, IfFileNotOpen ); + detail::BufferedActions &getFileData(InvalidatableFile file, IfFileNotOpen); - void dropFileData( InvalidatableFile file ); + void dropFileData(InvalidatableFile file); /* * Prepare a variable that already exists for an IO @@ -412,10 +393,12 @@ class ADIOS2IOHandlerImpl * (3) setting the offset and extent (ADIOS lingo: start * and count) */ - template < typename T > - adios2::Variable< T > verifyDataset( Offset const & offset, - Extent const & extent, adios2::IO & IO, - std::string const & var ); + template + adios2::Variable verifyDataset( + Offset const &offset, + Extent const &extent, + adios2::IO &IO, + std::string const &var); }; // ADIOS2IOHandlerImpl /* @@ -424,7 +407,7 @@ class ADIOS2IOHandlerImpl */ namespace ADIOS2Defaults { - using const_str = char const * const; + using const_str = char const *const; constexpr const_str str_engine = "engine"; constexpr const_str str_type = "type"; constexpr const_str str_params = "parameters"; @@ -443,103 +426,94 @@ namespace detail struct DatasetReader { - openPMD::ADIOS2IOHandlerImpl * m_impl; + openPMD::ADIOS2IOHandlerImpl *m_impl; + explicit DatasetReader(openPMD::ADIOS2IOHandlerImpl *impl); - explicit DatasetReader( openPMD::ADIOS2IOHandlerImpl * impl ); - - - template < typename T > - void operator( )( BufferedGet & bp, adios2::IO & IO, - adios2::Engine & engine, - std::string const & fileName ); + template + void operator()( + BufferedGet &bp, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &fileName); std::string errorMsg = "ADIOS2: readDataset()"; }; struct OldAttributeReader { - template< typename T > - Datatype - operator()( - adios2::IO & IO, + template + Datatype operator()( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - template< int n, typename... Params > - Datatype - operator()( Params &&... ); + template + Datatype operator()(Params &&...); }; struct OldAttributeWriter { - template< typename T > - void - operator()( - ADIOS2IOHandlerImpl * impl, - Writable * writable, - const Parameter< Operation::WRITE_ATT > & parameters ); - - - template< int n, typename... Params > - void - operator()( Params &&... ); + template + void operator()( + ADIOS2IOHandlerImpl *impl, + Writable *writable, + const Parameter ¶meters); + + template + void operator()(Params &&...); }; struct AttributeReader { - template< typename T > - Datatype - operator()( - adios2::IO & IO, - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + Datatype operator()( + adios2::IO &IO, + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - template < int n, typename... Params > - Datatype operator( )( Params &&... ); + template + Datatype operator()(Params &&...); }; struct AttributeWriter { - template < typename T > - void - operator()( - detail::BufferedAttributeWrite & params, - BufferedActions & fileData ); - + template + void operator()( + detail::BufferedAttributeWrite ¶ms, BufferedActions &fileData); - template < int n, typename... Params > void operator( )( Params &&... ); + template + void operator()(Params &&...); }; struct DatasetOpener { - ADIOS2IOHandlerImpl * m_impl; - - explicit DatasetOpener( ADIOS2IOHandlerImpl * impl ); + ADIOS2IOHandlerImpl *m_impl; + explicit DatasetOpener(ADIOS2IOHandlerImpl *impl); - template < typename T > - void operator( )( InvalidatableFile, const std::string & varName, - Parameter< Operation::OPEN_DATASET > & parameters ); - + template + void operator()( + InvalidatableFile, + const std::string &varName, + Parameter ¶meters); std::string errorMsg = "ADIOS2: openDataset()"; }; struct WriteDataset { - ADIOS2IOHandlerImpl * m_handlerImpl; - - - WriteDataset( ADIOS2IOHandlerImpl * handlerImpl ); + ADIOS2IOHandlerImpl *m_handlerImpl; + WriteDataset(ADIOS2IOHandlerImpl *handlerImpl); - template < typename T > - void operator( )( BufferedPut & bp, adios2::IO & IO, - adios2::Engine & engine ); + template + void + operator()(BufferedPut &bp, adios2::IO &IO, adios2::Engine &engine); - template < int n, typename... Params > void operator( )( Params &&... ); + template + void operator()(Params &&...); }; struct VariableDefiner @@ -559,31 +533,31 @@ namespace detail * @param count As in adios2::IO::DefineVariable * @param constantDims As in adios2::IO::DefineVariable */ - template < typename T > - void operator( )( - adios2::IO & IO, - std::string const & name, - std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > const & - compressions, - adios2::Dims const & shape = adios2::Dims(), - adios2::Dims const & start = adios2::Dims(), - adios2::Dims const & count = adios2::Dims(), - bool const constantDims = false ); + template + void operator()( + adios2::IO &IO, + std::string const &name, + std::vector const + &compressions, + adios2::Dims const &shape = adios2::Dims(), + adios2::Dims const &start = adios2::Dims(), + adios2::Dims const &count = adios2::Dims(), + bool const constantDims = false); std::string errorMsg = "ADIOS2: defineVariable()"; }; struct RetrieveBlocksInfo { - template < typename T > - void operator( )( - Parameter< Operation::AVAILABLE_CHUNKS > & params, - adios2::IO & IO, - adios2::Engine & engine, - std::string const & varName ); - - template < int n, typename... Params > - void operator( )( Params &&... ); + template + void operator()( + Parameter ¶ms, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &varName); + + template + void operator()(Params &&...); }; // Helper structs to help distinguish valid attribute/variable @@ -594,207 +568,186 @@ namespace detail * for vector and array types, as well as the boolean * type (which is not natively supported by ADIOS). */ - template< typename T > + template struct AttributeTypes { static void - oldCreateAttribute( - adios2::IO & IO, - std::string name, - T value ); + oldCreateAttribute(adios2::IO &IO, std::string name, T value); - static void - oldReadAttribute( - adios2::IO & IO, + static void oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - T value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + T value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); /** * @brief Is the attribute given by parameters name and val already * defined exactly in that way within the given IO? */ - static bool - attributeUnchanged( adios2::IO & IO, std::string name, T val ) + static bool attributeUnchanged(adios2::IO &IO, std::string name, T val) { - auto attr = IO.InquireAttribute< T >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< T > data = attr.Data(); - if( data.size() != 1 ) + std::vector data = attr.Data(); + if (data.size() != 1) { return false; } - return data[ 0 ] == val; + return data[0] == val; } }; - template< > struct AttributeTypes< std::complex< long double > > + template <> + struct AttributeTypes > { static void - oldCreateAttribute( - adios2::IO &, - std::string, - std::complex< long double > ) + oldCreateAttribute(adios2::IO &, std::string, std::complex) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } - static void - oldReadAttribute( - adios2::IO &, - std::string, - std::shared_ptr< Attribute::resource > ) + static void oldReadAttribute( + adios2::IO &, std::string, std::shared_ptr) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } - static void - createAttribute( + static void createAttribute( adios2::IO &, adios2::Engine &, detail::BufferedAttributeWrite &, - std::complex< long double > ) + std::complex) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string, - std::shared_ptr< Attribute::resource > ) + std::shared_ptr) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } static bool - attributeUnchanged( - adios2::IO &, std::string, std::complex< long double > ) + attributeUnchanged(adios2::IO &, std::string, std::complex) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } }; - template< > struct AttributeTypes< std::vector< std::complex< long double > > > + template <> + struct AttributeTypes > > { - static void - oldCreateAttribute( + static void oldCreateAttribute( adios2::IO &, std::string, - const std::vector< std::complex< long double > > & ) + const std::vector > &) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } - static void - oldReadAttribute( - adios2::IO &, - std::string, - std::shared_ptr< Attribute::resource > ) + static void oldReadAttribute( + adios2::IO &, std::string, std::shared_ptr) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } - static void - createAttribute( + static void createAttribute( adios2::IO &, adios2::Engine &, detail::BufferedAttributeWrite &, - const std::vector< std::complex< long double > > & ) + const std::vector > &) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string, - std::shared_ptr< Attribute::resource > ) + std::shared_ptr) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } - static bool - attributeUnchanged( - adios2::IO &, - std::string, - std::vector< std::complex< long double > > ) + static bool attributeUnchanged( + adios2::IO &, std::string, std::vector >) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } }; - template < typename T > struct AttributeTypes< std::vector< T > > + template + struct AttributeTypes > { - static void - oldCreateAttribute( - adios2::IO & IO, - std::string name, - const std::vector< T > & value ); + static void oldCreateAttribute( + adios2::IO &IO, std::string name, const std::vector &value); - static void - oldReadAttribute( - adios2::IO & IO, + static void oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< T > & value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); static bool - attributeUnchanged( - adios2::IO & IO, - std::string name, - std::vector< T > val ) + attributeUnchanged(adios2::IO &IO, std::string name, std::vector val) { - auto attr = IO.InquireAttribute< T >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< T > data = attr.Data(); - if( data.size() != val.size() ) + std::vector data = attr.Data(); + if (data.size() != val.size()) { return false; } - for( size_t i = 0; i < val.size(); ++i ) + for (size_t i = 0; i < val.size(); ++i) { - if( data[ i ] != val[ i ] ) + if (data[i] != val[i]) { return false; } @@ -803,53 +756,46 @@ namespace detail } }; - template<> - struct AttributeTypes< std::vector< std::string > > + template <> + struct AttributeTypes > { - static void - oldCreateAttribute( - adios2::IO & IO, + static void oldCreateAttribute( + adios2::IO &IO, std::string name, - const std::vector< std::string > & value ); + const std::vector &value); - static void - oldReadAttribute( - adios2::IO & IO, + static void oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< std::string > & vec ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &vec); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static bool - attributeUnchanged( - adios2::IO & IO, - std::string name, - std::vector< std::string > val ) + static bool attributeUnchanged( + adios2::IO &IO, std::string name, std::vector val) { - auto attr = IO.InquireAttribute< std::string >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< std::string > data = attr.Data(); - if( data.size() != val.size() ) + std::vector data = attr.Data(); + if (data.size() != val.size()) { return false; } - for( size_t i = 0; i < val.size(); ++i ) + for (size_t i = 0; i < val.size(); ++i) { - if( data[ i ] != val[ i ] ) + if (data[i] != val[i]) { return false; } @@ -858,53 +804,44 @@ namespace detail } }; - template < typename T, size_t n > - struct AttributeTypes< std::array< T, n > > + template + struct AttributeTypes > { - static void - oldCreateAttribute( - adios2::IO & IO, - std::string name, - const std::array< T, n > & value ); + static void oldCreateAttribute( + adios2::IO &IO, std::string name, const std::array &value); - static void - oldReadAttribute( - adios2::IO & IO, + static void oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::array< T, n > & value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::array &value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static bool - attributeUnchanged( - adios2::IO & IO, - std::string name, - std::array< T, n > val ) + static bool attributeUnchanged( + adios2::IO &IO, std::string name, std::array val) { - auto attr = IO.InquireAttribute< T >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< T > data = attr.Data(); - if( data.size() != n ) + std::vector data = attr.Data(); + if (data.size() != n) { return false; } - for( size_t i = 0; i < n; ++i ) + for (size_t i = 0; i < n; ++i) { - if( data[ i ] != val[ i ] ) + if (data[i] != val[i]) { return false; } @@ -913,64 +850,59 @@ namespace detail } }; - template <> struct AttributeTypes< bool > + template <> + struct AttributeTypes { using rep = detail::bool_representation; static void - oldCreateAttribute( adios2::IO & IO, std::string name, bool value ); + oldCreateAttribute(adios2::IO &IO, std::string name, bool value); - static void - oldReadAttribute( - adios2::IO & IO, + static void oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - bool value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + bool value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); - + std::shared_ptr resource); - static constexpr rep toRep( bool b ) + static constexpr rep toRep(bool b) { return b ? 1U : 0U; } - - static constexpr bool fromRep( rep r ) + static constexpr bool fromRep(rep r) { return r != 0; } static bool - attributeUnchanged( adios2::IO & IO, std::string name, bool val ) + attributeUnchanged(adios2::IO &IO, std::string name, bool val) { - auto attr = IO.InquireAttribute< rep >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< rep > data = attr.Data(); - if( data.size() != 1 ) + std::vector data = attr.Data(); + if (data.size() != 1) { return false; } - return data[ 0 ] == toRep( val ); + return data[0] == toRep(val); } }; // Other datatypes used in the ADIOS2IOHandler implementation - struct BufferedActions; /* @@ -978,49 +910,48 @@ namespace detail */ struct BufferedAction { - explicit BufferedAction( ) = default; - virtual ~BufferedAction( ) = default; + explicit BufferedAction() = default; + virtual ~BufferedAction() = default; - BufferedAction( BufferedAction const & other ) = delete; - BufferedAction( BufferedAction && other ) = default; + BufferedAction(BufferedAction const &other) = delete; + BufferedAction(BufferedAction &&other) = default; - BufferedAction & operator=( BufferedAction const & other ) = delete; - BufferedAction & operator=( BufferedAction && other ) = default; + BufferedAction &operator=(BufferedAction const &other) = delete; + BufferedAction &operator=(BufferedAction &&other) = default; - virtual void run( BufferedActions & ) = 0; + virtual void run(BufferedActions &) = 0; }; struct BufferedGet : BufferedAction { std::string name; - Parameter< Operation::READ_DATASET > param; + Parameter param; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct BufferedPut : BufferedAction { std::string name; - Parameter< Operation::WRITE_DATASET > param; + Parameter param; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct OldBufferedAttributeRead : BufferedAction { - Parameter< Operation::READ_ATT > param; + Parameter param; std::string name; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct BufferedAttributeRead { - Parameter< Operation::READ_ATT > param; + Parameter param; std::string name; - void - run( BufferedActions & ); + void run(BufferedActions &); }; struct BufferedAttributeWrite : BufferedAction @@ -1028,9 +959,9 @@ namespace detail std::string name; Datatype dtype; Attribute::resource resource; - std::vector< char > bufferForVecString; + std::vector bufferForVecString; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct I_UpdateSpan @@ -1039,12 +970,12 @@ namespace detail virtual ~I_UpdateSpan() = default; }; - template< typename T > + template struct UpdateSpan : I_UpdateSpan { - adios2::detail::Span< T > span; + adios2::detail::Span span; - UpdateSpan( adios2::detail::Span< T > ); + UpdateSpan(adios2::detail::Span); void *update() override; }; @@ -1056,7 +987,7 @@ namespace detail */ struct BufferedActions { - BufferedActions( BufferedActions const & ) = delete; + BufferedActions(BufferedActions const &) = delete; /** * The full path to the file created on disk, including the @@ -1085,13 +1016,13 @@ namespace detail * IO. */ std::string const m_IOName; - adios2::ADIOS & m_ADIOS; + adios2::ADIOS &m_ADIOS; adios2::IO m_IO; /** * The default queue for deferred actions. * Drained upon BufferedActions::flush(). */ - std::vector< std::unique_ptr< BufferedAction > > m_buffer; + std::vector > m_buffer; /** * Buffer for attributes to be written in the new (variable-based) * attribute layout. @@ -1102,20 +1033,20 @@ namespace detail * write commands. * The queue is drained only when closing a step / the engine. */ - std::map< std::string, BufferedAttributeWrite > m_attributeWrites; + std::map m_attributeWrites; /** * @todo This one is unnecessary, in the new schema, attribute reads do * not need to be deferred, but can happen instantly without performance * penalty, once preloadAttributes has been filled. */ - std::vector< BufferedAttributeRead > m_attributeReads; + std::vector m_attributeReads; /** * This contains deferred actions that have already been enqueued into * ADIOS2, but not yet performed in ADIOS2. * We must store them somewhere until the next PerformPuts/Gets, EndStep * or Close in ADIOS2 to avoid use after free conditions. */ - std::vector< std::unique_ptr< BufferedAction > > m_alreadyEnqueued; + std::vector > m_alreadyEnqueued; adios2::Mode m_mode; /** * The base pointer of an ADIOS2 span might change after reallocations. @@ -1125,7 +1056,7 @@ namespace detail * retrieval of the updated base pointer. * This map is cleared upon flush points. */ - std::map< unsigned, std::unique_ptr< I_UpdateSpan > > m_updateSpans; + std::map > m_updateSpans; detail::WriteDataset const m_writeDataset; detail::DatasetReader const m_readDataset; detail::AttributeReader const m_attributeReader; @@ -1136,7 +1067,7 @@ namespace detail * written has been closed. * A committed attribute cannot be modified. */ - std::set< std::string > uncommittedAttributes; + std::set uncommittedAttributes; /* * The openPMD API will generally create new attributes for each @@ -1150,25 +1081,26 @@ namespace detail */ bool optimizeAttributesStreaming = false; - using AttributeMap_t = std::map< std::string, adios2::Params >; + using AttributeMap_t = std::map; - BufferedActions( ADIOS2IOHandlerImpl & impl, InvalidatableFile file ); + BufferedActions(ADIOS2IOHandlerImpl &impl, InvalidatableFile file); - ~BufferedActions( ); + ~BufferedActions(); /** * Implementation of destructor, will only run once. * */ - void - finalize(); + void finalize(); - adios2::Engine & getEngine( ); - adios2::Engine & requireActiveStep( ); + adios2::Engine &getEngine(); + adios2::Engine &requireActiveStep(); - template < typename BA > void enqueue( BA && ba ); + template + void enqueue(BA &&ba); - template < typename BA > void enqueue( BA && ba, decltype( m_buffer ) & ); + template + void enqueue(BA &&ba, decltype(m_buffer) &); /** * Flush deferred IO actions. @@ -1186,21 +1118,19 @@ namespace detail * @param flushUnconditionally Whether to run the functor even if no * deferred IO tasks had been queued. */ - template< typename F > - void - flush( + template + void flush( FlushLevel level, - F && performPutsGets, + F &&performPutsGets, bool writeAttributes, - bool flushUnconditionally ); + bool flushUnconditionally); /** * Overload of flush() that uses adios2::Engine::Perform(Puts|Gets) * and does not flush unconditionally. * */ - void - flush( FlushLevel, bool writeAttributes = false ); + void flush(FlushLevel, bool writeAttributes = false); /** * @brief Begin or end an ADIOS step. @@ -1208,41 +1138,36 @@ namespace detail * @param mode Whether to begin or end a step. * @return AdvanceStatus */ - AdvanceStatus - advance( AdvanceMode mode ); + AdvanceStatus advance(AdvanceMode mode); /* * Delete all buffered actions without running them. */ - void drop( ); + void drop(); - AttributeMap_t const & - availableAttributes(); + AttributeMap_t const &availableAttributes(); - std::vector< std::string > - availableAttributesPrefixed( std::string const & prefix ); + std::vector + availableAttributesPrefixed(std::string const &prefix); /* * See description below. */ - void - invalidateAttributesMap(); + void invalidateAttributesMap(); - AttributeMap_t const & - availableVariables(); + AttributeMap_t const &availableVariables(); - std::vector< std::string > - availableVariablesPrefixed( std::string const & prefix ); + std::vector + availableVariablesPrefixed(std::string const &prefix); /* * See description below. */ - void - invalidateVariablesMap(); + void invalidateVariablesMap(); private: - ADIOS2IOHandlerImpl * m_impl; - auxiliary::Option< adios2::Engine > m_engine; //! ADIOS engine + ADIOS2IOHandlerImpl *m_impl; + auxiliary::Option m_engine; //! ADIOS engine /** * The ADIOS2 engine type, to be passed to adios2::IO::SetEngine */ @@ -1314,7 +1239,7 @@ namespace detail * are initialized with the OutsideOfStep state). * A step should only be opened if an explicit ADVANCE task arrives * at the backend. - * + * * @todo If the streaming API is used on files, parsing the whole * Series up front is unnecessary work. * Our frontend does not yet allow to distinguish whether @@ -1358,8 +1283,8 @@ namespace detail * the map that would be returned by a call to * IO::Available(Attributes|Variables). */ - auxiliary::Option< AttributeMap_t > m_availableAttributes; - auxiliary::Option< AttributeMap_t > m_availableVariables; + auxiliary::Option m_availableAttributes; + auxiliary::Option m_availableVariables; /* * finalize() will set this true to avoid running twice. @@ -1371,8 +1296,7 @@ namespace detail return m_impl->schema(); } - void - configure_IO( ADIOS2IOHandlerImpl & impl ); + void configure_IO(ADIOS2IOHandlerImpl &impl); using AttributeLayout = ADIOS2IOHandlerImpl::AttributeLayout; inline AttributeLayout attributeLayout() const @@ -1384,31 +1308,31 @@ namespace detail } // namespace detail #endif // openPMD_HAVE_ADIOS2 - class ADIOS2IOHandler : public AbstractIOHandler { #if openPMD_HAVE_ADIOS2 -friend class ADIOS2IOHandlerImpl; + friend class ADIOS2IOHandlerImpl; private: ADIOS2IOHandlerImpl m_impl; public: - ~ADIOS2IOHandler( ) override + ~ADIOS2IOHandler() override { // we must not throw in a destructor try { - this->flush( ); + this->flush(internal::defaultFlushParams); } - catch( std::exception const & ex ) + catch (std::exception const &ex) { - std::cerr << "[~ADIOS2IOHandler] An error occurred: " << ex.what() << std::endl; + std::cerr << "[~ADIOS2IOHandler] An error occurred: " << ex.what() + << std::endl; } - catch( ... ) + catch (...) { - std::cerr << "[~ADIOS2IOHandler] An error occurred." << std::endl; + std::cerr << "[~ADIOS2IOHandler] An error occurred." << std::endl; } } @@ -1423,7 +1347,7 @@ friend class ADIOS2IOHandlerImpl; Access, MPI_Comm, nlohmann::json options, - std::string engineType ); + std::string engineType); #endif @@ -1431,10 +1355,13 @@ friend class ADIOS2IOHandlerImpl; std::string path, Access, nlohmann::json options, - std::string engineType ); + std::string engineType); - std::string backendName() const override { return "ADIOS2"; } + std::string backendName() const override + { + return "ADIOS2"; + } - std::future< void > flush( ) override; + std::future flush(internal::FlushParams const &) override; }; // ADIOS2IOHandler } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp b/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp index e50fe18d1e..75c2613674 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp @@ -41,11 +41,11 @@ namespace detail * * @tparam T Underlying attribute data type. */ - template< typename T > + template struct AttributeWithShape { adios2::Dims shape; - T const * data; + T const *data; }; /** @@ -76,14 +76,14 @@ namespace detail char *destroy = nullptr; AttributeLocation() = delete; - AttributeLocation( adios2::Dims shape, size_t offset, Datatype dt ); + AttributeLocation(adios2::Dims shape, size_t offset, Datatype dt); - AttributeLocation( AttributeLocation const & other ) = delete; + AttributeLocation(AttributeLocation const &other) = delete; AttributeLocation & - operator=( AttributeLocation const & other ) = delete; + operator=(AttributeLocation const &other) = delete; - AttributeLocation( AttributeLocation && other ); - AttributeLocation & operator=( AttributeLocation && other ); + AttributeLocation(AttributeLocation &&other); + AttributeLocation &operator=(AttributeLocation &&other); ~AttributeLocation(); }; @@ -97,18 +97,18 @@ namespace detail * ::operator new(std::size_t) * https://en.cppreference.com/w/cpp/memory/allocator/allocate */ - std::vector< char > m_rawBuffer; - std::map< std::string, AttributeLocation > m_offsets; + std::vector m_rawBuffer; + std::map m_offsets; public: explicit PreloadAdiosAttributes() = default; - PreloadAdiosAttributes( PreloadAdiosAttributes const & other ) = delete; + PreloadAdiosAttributes(PreloadAdiosAttributes const &other) = delete; PreloadAdiosAttributes & - operator=( PreloadAdiosAttributes const & other ) = delete; + operator=(PreloadAdiosAttributes const &other) = delete; - PreloadAdiosAttributes( PreloadAdiosAttributes && other ) = default; + PreloadAdiosAttributes(PreloadAdiosAttributes &&other) = default; PreloadAdiosAttributes & - operator=( PreloadAdiosAttributes && other ) = default; + operator=(PreloadAdiosAttributes &&other) = default; /** * @brief Schedule attributes for preloading. @@ -120,8 +120,7 @@ namespace detail * @param IO * @param engine */ - void - preloadAttributes( adios2::IO & IO, adios2::Engine & engine ); + void preloadAttributes(adios2::IO &IO, adios2::Engine &engine); /** * @brief Get an attribute that has been buffered previously. @@ -133,43 +132,42 @@ namespace detail * the attribute's shape. Valid only until any non-const member * of PreloadAdiosAttributes is called. */ - template< typename T > - AttributeWithShape< T > getAttribute( std::string const & name ) const; + template + AttributeWithShape getAttribute(std::string const &name) const; - Datatype attributeType( std::string const & name ) const; + Datatype attributeType(std::string const &name) const; }; - template< typename T > - AttributeWithShape< T > - PreloadAdiosAttributes::getAttribute( std::string const & name ) const + template + AttributeWithShape + PreloadAdiosAttributes::getAttribute(std::string const &name) const { - auto it = m_offsets.find( name ); - if( it == m_offsets.end() ) + auto it = m_offsets.find(name); + if (it == m_offsets.end()) { throw std::runtime_error( - "[ADIOS2] Requested attribute not found: " + name ); + "[ADIOS2] Requested attribute not found: " + name); } - AttributeLocation const & location = it->second; - Datatype determinedDatatype = determineDatatype< T >(); - if( std::is_same< T, signed char >::value ) + AttributeLocation const &location = it->second; + Datatype determinedDatatype = determineDatatype(); + if (std::is_same::value) { // workaround: we use Datatype::CHAR to represent ADIOS2 signed char // (ADIOS2 does not have chars with unspecified signed-ness // anyway) determinedDatatype = Datatype::CHAR; } - if( location.dt != determinedDatatype ) + if (location.dt != determinedDatatype) { std::stringstream errorMsg; errorMsg << "[ADIOS2] Wrong datatype for attribute: " << name << "(location.dt=" << location.dt - << ", T=" << determineDatatype< T >() << ")"; - throw std::runtime_error( errorMsg.str() ); + << ", T=" << determineDatatype() << ")"; + throw std::runtime_error(errorMsg.str()); } - AttributeWithShape< T > res; + AttributeWithShape res; res.shape = location.shape; - res.data = - reinterpret_cast< T const * >( &m_rawBuffer[ location.offset ] ); + res.data = reinterpret_cast(&m_rawBuffer[location.offset]); return res; } } // namespace detail diff --git a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp index 817b6f56df..20ee1458a5 100644 --- a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp @@ -20,46 +20,48 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Export.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/config.hpp" #include #include #include #if openPMD_HAVE_ADIOS1 -# include +#include #endif - namespace openPMD { - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl; - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandler : public AbstractIOHandler - { - friend class ParallelADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandler : public AbstractIOHandler +{ + friend class ParallelADIOS1IOHandlerImpl; - public: -# if openPMD_HAVE_MPI - ParallelADIOS1IOHandler(std::string path, Access, MPI_Comm); -# else - ParallelADIOS1IOHandler(std::string path, Access); -# endif - ~ParallelADIOS1IOHandler() override; +public: +#if openPMD_HAVE_MPI + ParallelADIOS1IOHandler(std::string path, Access, MPI_Comm); +#else + ParallelADIOS1IOHandler(std::string path, Access); +#endif + ~ParallelADIOS1IOHandler() override; - std::string backendName() const override { return "MPI_ADIOS1"; } + std::string backendName() const override + { + return "MPI_ADIOS1"; + } - std::future< void > flush() override; + std::future flush(internal::FlushParams const &) override; #if openPMD_HAVE_ADIOS1 - void enqueue(IOTask const&) override; + void enqueue(IOTask const &) override; #endif - private: +private: #if openPMD_HAVE_ADIOS1 - std::queue< IOTask > m_setup; + std::queue m_setup; #endif - std::unique_ptr< ParallelADIOS1IOHandlerImpl > m_impl; - }; // ParallelADIOS1IOHandler + std::unique_ptr m_impl; +}; // ParallelADIOS1IOHandler -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp index 162f622a0e..9344cc5e3b 100644 --- a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp +++ b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp @@ -20,87 +20,110 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Export.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" -# include -# include +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include +#include #endif #include #include #include #if openPMD_HAVE_ADIOS1 -# include -# include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl : public AbstractIOHandlerImpl - { - public: - ParallelADIOS1IOHandlerImpl(AbstractIOHandler*, MPI_Comm); - virtual ~ParallelADIOS1IOHandlerImpl(); +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl + : public AbstractIOHandlerImpl +{ +public: + ParallelADIOS1IOHandlerImpl(AbstractIOHandler *, MPI_Comm); + virtual ~ParallelADIOS1IOHandlerImpl(); - virtual void init(); + virtual void init(); - std::future< void > flush() override; + std::future flush(); - void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override; - void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override; - void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override; - void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override; - void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override; - void closeFile(Writable*, Parameter< Operation::CLOSE_FILE > const&) override; - void availableChunks(Writable*, Parameter< Operation::AVAILABLE_CHUNKS > &) override; - void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override; - void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override; - void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override; - void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override; - void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override; - void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override; - void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override; - void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override; - void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override; - void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override; - void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override; - void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override; - void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override; + void + createFile(Writable *, Parameter const &) override; + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; + void extendDataset( + Writable *, Parameter const &) override; + void openFile(Writable *, Parameter const &) override; + void + closeFile(Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; + void openPath(Writable *, Parameter const &) override; + void openDataset(Writable *, Parameter &) override; + void + deleteFile(Writable *, Parameter const &) override; + void + deletePath(Writable *, Parameter const &) override; + void deleteDataset( + Writable *, Parameter const &) override; + void deleteAttribute( + Writable *, Parameter const &) override; + void writeDataset( + Writable *, Parameter const &) override; + void writeAttribute( + Writable *, Parameter const &) override; + void readDataset(Writable *, Parameter &) override; + void readAttribute(Writable *, Parameter &) override; + void listPaths(Writable *, Parameter &) override; + void + listDatasets(Writable *, Parameter &) override; + void listAttributes(Writable *, Parameter &) override; - virtual int64_t open_write(Writable *); - virtual ADIOS_FILE* open_read(std::string const & name); - void close(int64_t); - void close(ADIOS_FILE*); - int64_t initialize_group(std::string const& name); - void flush_attribute(int64_t group, std::string const& name, Attribute const&); + virtual int64_t open_write(Writable *); + virtual ADIOS_FILE *open_read(std::string const &name); + void close(int64_t); + void close(ADIOS_FILE *); + int64_t initialize_group(std::string const &name); + void + flush_attribute(int64_t group, std::string const &name, Attribute const &); - protected: - ADIOS_READ_METHOD m_readMethod; - std::unordered_map< Writable*, std::shared_ptr< std::string > > m_filePaths; - std::unordered_map< std::shared_ptr< std::string >, int64_t > m_groups; - std::unordered_map< std::shared_ptr< std::string >, bool > m_existsOnDisk; - std::unordered_map< std::shared_ptr< std::string >, int64_t > m_openWriteFileHandles; - std::unordered_map< std::shared_ptr< std::string >, ADIOS_FILE* > m_openReadFileHandles; - std::unordered_map< ADIOS_FILE*, std::vector< ADIOS_SELECTION* > > m_scheduledReads; - std::unordered_map< int64_t, std::unordered_map< std::string, Attribute > > m_attributeWrites; - /** - * Call this function to get adios file id for a Writable. Will create one if does not exist - * @return returns an adios file id. - */ - int64_t GetFileHandle(Writable*); - MPI_Comm m_mpiComm; - MPI_Info m_mpiInfo; - }; // ParallelADIOS1IOHandlerImpl -#else - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl +protected: + ADIOS_READ_METHOD m_readMethod; + std::unordered_map > m_filePaths; + std::unordered_map, int64_t> m_groups; + std::unordered_map, bool> m_existsOnDisk; + std::unordered_map, int64_t> + m_openWriteFileHandles; + std::unordered_map, ADIOS_FILE *> + m_openReadFileHandles; + struct ScheduledRead { - }; // ParallelADIOS1IOHandlerImpl + ADIOS_SELECTION *selection; + std::shared_ptr data; // needed to avoid early freeing + }; + std::unordered_map > + m_scheduledReads; + std::unordered_map > + m_attributeWrites; + /** + * Call this function to get adios file id for a Writable. Will create one + * if does not exist + * @return returns an adios file id. + */ + int64_t GetFileHandle(Writable *); + MPI_Comm m_mpiComm; + MPI_Info m_mpiInfo; +}; // ParallelADIOS1IOHandlerImpl +#else +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl +{}; // ParallelADIOS1IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/AbstractFilePosition.hpp b/include/openPMD/IO/AbstractFilePosition.hpp index e822b27516..166ca554c5 100644 --- a/include/openPMD/IO/AbstractFilePosition.hpp +++ b/include/openPMD/IO/AbstractFilePosition.hpp @@ -20,7 +20,6 @@ */ #pragma once - namespace openPMD { class AbstractFilePosition @@ -28,4 +27,4 @@ class AbstractFilePosition public: virtual ~AbstractFilePosition() = default; }; // AbstractFilePosition -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index 9dae2ce97c..a7239a3375 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -20,13 +20,13 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/IO/Format.hpp" #include "openPMD/IO/IOTask.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_MPI -# include +#include #endif #include @@ -35,32 +35,35 @@ #include #include - namespace openPMD { class no_such_file_error : public std::runtime_error { public: - no_such_file_error(std::string const& what_arg) - : std::runtime_error(what_arg) - { } - virtual ~no_such_file_error() { } + no_such_file_error(std::string const &what_arg) + : std::runtime_error(what_arg) + {} + virtual ~no_such_file_error() + {} }; class unsupported_data_error : public std::runtime_error { public: - unsupported_data_error(std::string const& what_arg) - : std::runtime_error(what_arg) - { } - virtual ~unsupported_data_error() { } + unsupported_data_error(std::string const &what_arg) + : std::runtime_error(what_arg) + {} + virtual ~unsupported_data_error() + {} }; /** * @brief Determine what items should be flushed upon Series::flush() * */ -enum class FlushLevel : unsigned char +// do not write `enum class FlushLevel : unsigned char` here since NVHPC +// does not compile it correctly +enum class FlushLevel { /** * Flush operation that was triggered by user code. @@ -83,9 +86,30 @@ enum class FlushLevel : unsigned char * CREATE_DATASET tasks. * Attributes may or may not be flushed yet. */ - SkeletonOnly + SkeletonOnly, + /** + * Only creates/opens files, nothing more + */ + CreateOrOpenFiles }; +namespace internal +{ + /** + * Parameters recursively passed through the openPMD hierarchy when + * flushing. + * + */ + struct FlushParams + { + FlushLevel flushLevel = FlushLevel::InternalFlush; + }; + + /* + * To be used for reading + */ + constexpr FlushParams defaultFlushParams{}; +} // namespace internal /** Interface for communicating between logical and physically persistent data. * @@ -100,32 +124,30 @@ class AbstractIOHandler public: #if openPMD_HAVE_MPI AbstractIOHandler(std::string path, Access at, MPI_Comm) - : directory{std::move(path)}, - m_backendAccess{at}, - m_frontendAccess{at} - { } + : directory{std::move(path)}, m_backendAccess{at}, m_frontendAccess{at} + {} #endif AbstractIOHandler(std::string path, Access at) - : directory{std::move(path)}, - m_backendAccess{at}, - m_frontendAccess{at} - { } + : directory{std::move(path)}, m_backendAccess{at}, m_frontendAccess{at} + {} virtual ~AbstractIOHandler() = default; /** Add provided task to queue according to FIFO. * - * @param iotask Task to be executed after all previously enqueued IOTasks complete. + * @param iotask Task to be executed after all previously enqueued + * IOTasks complete. */ - virtual void enqueue(IOTask const& iotask) + virtual void enqueue(IOTask const &iotask) { m_work.push(iotask); } /** Process operations in queue according to FIFO. * - * @return Future indicating the completion state of the operation for backends that decide to implement this operation asynchronously. + * @return Future indicating the completion state of the operation for + * backends that decide to implement this operation asynchronously. */ - virtual std::future< void > flush() = 0; + virtual std::future flush(internal::FlushParams const &) = 0; /** The currently used backend */ virtual std::string backendName() const = 0; @@ -133,8 +155,7 @@ class AbstractIOHandler std::string const directory; Access const m_backendAccess; Access const m_frontendAccess; - std::queue< IOTask > m_work; - FlushLevel m_flushLevel = FlushLevel::InternalFlush; + std::queue m_work; }; // AbstractIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandlerHelper.hpp b/include/openPMD/IO/AbstractIOHandlerHelper.hpp index 42b03be068..e6775a4757 100644 --- a/include/openPMD/IO/AbstractIOHandlerHelper.hpp +++ b/include/openPMD/IO/AbstractIOHandlerHelper.hpp @@ -20,34 +20,35 @@ */ #pragma once - -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/config.hpp" namespace openPMD { #if openPMD_HAVE_MPI - /** Construct an appropriate specific IOHandler for the desired IO mode that may be MPI-aware. - * - * @param path Path to root folder for all operations associated with the desired handler. - * @param access Access mode describing desired operations and permissions of the desired handler. - * @param format Format describing the IO backend of the desired handler. - * @param comm MPI communicator used for IO. - * @param options JSON-formatted option string, to be interpreted by - * the backend. - * @tparam JSON Substitute for nlohmann::json. Templated to avoid - including nlohmann::json in a .hpp file. - * @return Smart pointer to created IOHandler. - */ -template< typename JSON > -std::shared_ptr< AbstractIOHandler > -createIOHandler( +/** Construct an appropriate specific IOHandler for the desired IO mode that may + be MPI-aware. + * + * @param path Path to root folder for all operations associated with + the desired handler. + * @param access Access mode describing desired operations and + permissions of the desired handler. + * @param format Format describing the IO backend of the desired handler. + * @param comm MPI communicator used for IO. + * @param options JSON-formatted option string, to be interpreted by + * the backend. + * @tparam JSON Substitute for nlohmann::json. Templated to avoid + including nlohmann::json in a .hpp file. + * @return Smart pointer to created IOHandler. + */ +template +std::shared_ptr createIOHandler( std::string path, Access access, Format format, MPI_Comm comm, - JSON options ); + JSON options); #endif /** Construct an appropriate specific IOHandler for the desired IO mode. @@ -63,18 +64,11 @@ createIOHandler( including nlohmann::json in a .hpp file. * @return Smart pointer to created IOHandler. */ -template< typename JSON > -std::shared_ptr< AbstractIOHandler > -createIOHandler( - std::string path, - Access access, - Format format, - JSON options = JSON() ); +template +std::shared_ptr createIOHandler( + std::string path, Access access, Format format, JSON options = JSON()); // version without configuration to use in AuxiliaryTest -std::shared_ptr< AbstractIOHandler > -createIOHandler( - std::string path, - Access access, - Format format ); +std::shared_ptr +createIOHandler(std::string path, Access access, Format format); } // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandlerImpl.hpp b/include/openPMD/IO/AbstractIOHandlerImpl.hpp index 619f0ea857..170cf4b81a 100644 --- a/include/openPMD/IO/AbstractIOHandlerImpl.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImpl.hpp @@ -27,7 +27,6 @@ #include #include - namespace openPMD { // class AbstractIOHandler; @@ -36,359 +35,494 @@ class Writable; class AbstractIOHandlerImpl { public: - AbstractIOHandlerImpl(AbstractIOHandler *handler) - : m_handler{handler} - { } + AbstractIOHandlerImpl(AbstractIOHandler *handler) : m_handler{handler} + {} virtual ~AbstractIOHandlerImpl() = default; - virtual std::future< void > flush() + std::future flush() { using namespace auxiliary; - while( !(*m_handler).m_work.empty() ) + while (!(*m_handler).m_work.empty()) { - IOTask& i = (*m_handler).m_work.front(); + IOTask &i = (*m_handler).m_work.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::CREATE_FILE: - createFile(i.writable, deref_dynamic_cast< Parameter< Operation::CREATE_FILE > >(i.parameter.get())); - break; - case O::CREATE_PATH: - createPath(i.writable, deref_dynamic_cast< Parameter< O::CREATE_PATH > >(i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset(i.writable, deref_dynamic_cast< Parameter< O::CREATE_DATASET > >(i.parameter.get())); - break; - case O::EXTEND_DATASET: - extendDataset(i.writable, deref_dynamic_cast< Parameter< O::EXTEND_DATASET > >(i.parameter.get())); - break; - case O::OPEN_FILE: - openFile(i.writable, deref_dynamic_cast< Parameter< O::OPEN_FILE > >(i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_FILE > >(i.parameter.get())); - break; - case O::OPEN_PATH: - openPath(i.writable, deref_dynamic_cast< Parameter< O::OPEN_PATH > >(i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_PATH > >(i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset(i.writable, deref_dynamic_cast< Parameter< O::OPEN_DATASET > >(i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile(i.writable, deref_dynamic_cast< Parameter< O::DELETE_FILE > >(i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath(i.writable, deref_dynamic_cast< Parameter< O::DELETE_PATH > >(i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset(i.writable, deref_dynamic_cast< Parameter< O::DELETE_DATASET > >(i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute(i.writable, deref_dynamic_cast< Parameter< O::DELETE_ATT > >(i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset(i.writable, deref_dynamic_cast< Parameter< O::WRITE_DATASET > >(i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute(i.writable, deref_dynamic_cast< Parameter< O::WRITE_ATT > >(i.parameter.get())); - break; - case O::READ_DATASET: - readDataset(i.writable, deref_dynamic_cast< Parameter< O::READ_DATASET > >(i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView(i.writable, deref_dynamic_cast< Parameter< O::GET_BUFFER_VIEW > >(i.parameter.get())); - break; - case O::READ_ATT: - readAttribute(i.writable, deref_dynamic_cast< Parameter< O::READ_ATT > >(i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths(i.writable, deref_dynamic_cast< Parameter< O::LIST_PATHS > >(i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets(i.writable, deref_dynamic_cast< Parameter< O::LIST_DATASETS > >(i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes(i.writable, deref_dynamic_cast< Parameter< O::LIST_ATTS > >(i.parameter.get())); - break; - case O::ADVANCE: - advance(i.writable, deref_dynamic_cast< Parameter< O::ADVANCE > >(i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks(i.writable, deref_dynamic_cast< Parameter< O::AVAILABLE_CHUNKS > >(i.parameter.get())); - break; + case O::CREATE_FILE: + createFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CREATE_PATH: + createPath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CREATE_DATASET: + createDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::EXTEND_DATASET: + extendDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_FILE: + openFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CLOSE_FILE: + closeFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_PATH: + openPath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CLOSE_PATH: + closePath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_DATASET: + openDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_FILE: + deleteFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_PATH: + deletePath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_DATASET: + deleteDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_ATT: + deleteAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::WRITE_DATASET: + writeDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::WRITE_ATT: + writeAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::READ_DATASET: + readDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::GET_BUFFER_VIEW: + getBufferView( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::READ_ATT: + readAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_PATHS: + listPaths( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_DATASETS: + listDatasets( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_ATTS: + listAttributes( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::ADVANCE: + advance( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::AVAILABLE_CHUNKS: + availableChunks( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; } - } catch (...) + } + catch (...) { - std::cerr - << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString( i.operation ) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." - << std::endl; + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; (*m_handler).m_work.pop(); throw; } (*m_handler).m_work.pop(); } - return std::future< void >(); + return std::future(); } - /** - * Close the file corresponding with the writable and release file handles. - * The operation should succeed in any access mode. - */ - virtual void - closeFile( Writable *, Parameter< Operation::CLOSE_FILE > const & ) = 0; + /** + * Close the file corresponding with the writable and release file handles. + * The operation should succeed in any access mode. + */ + virtual void + closeFile(Writable *, Parameter const &) = 0; - /** Advance the file/stream that this writable belongs to. - * - * If the backend is based around usage of IO steps (especially streaming - * backends), open or close an IO step. This is modeled closely after the - * step concept in ADIOS2. - * - * This task is used to implement streaming-aware semantics in the openPMD - * API by splitting data into packets that are written to and read from - * transport. - * - * IO actions up to the point of closing a step must be performed now. - * - * The advance mode is determined by parameters.mode. - * The return status code shall be stored as parameters.status. - */ - virtual void - advance( Writable *, Parameter< Operation::ADVANCE > & ) - {} + /** Advance the file/stream that this writable belongs to. + * + * If the backend is based around usage of IO steps (especially streaming + * backends), open or close an IO step. This is modeled closely after the + * step concept in ADIOS2. + * + * This task is used to implement streaming-aware semantics in the openPMD + * API by splitting data into packets that are written to and read from + * transport. + * + * IO actions up to the point of closing a step must be performed now. + * + * The advance mode is determined by parameters.mode. + * The return status code shall be stored as parameters.status. + */ + virtual void advance(Writable *, Parameter &) + {} - /** Close an openPMD group. - * - * This is an optimization-enabling task and may be ignored by backends. - * Indicates that the group will not be accessed any further. - * Especially in step-based IO mode (e.g. streaming): - * Indicates that the group corresponding with the writable needs not be held - * in a parseable state for this and upcoming IO steps, allowing for deletion - * of metadata to be sent/stored (attributes, datasets, ..). - * Should fail if the writable is not written. - * Should fail if m_handler->accessType is AccessType::READ_ONLY. - * - */ - virtual void - closePath( Writable *, Parameter< Operation::CLOSE_PATH > const & ) - {} + /** Close an openPMD group. + * + * This is an optimization-enabling task and may be ignored by backends. + * Indicates that the group will not be accessed any further. + * Especially in step-based IO mode (e.g. streaming): + * Indicates that the group corresponding with the writable needs not be + * held in a parseable state for this and upcoming IO steps, allowing for + * deletion of metadata to be sent/stored (attributes, datasets, ..). Should + * fail if the writable is not written. Should fail if m_handler->accessType + * is AccessType::READ_ONLY. + * + */ + virtual void closePath(Writable *, Parameter const &) + {} - /** Report chunks that are available for loading from the dataset represented - * by this writable. - * - * The resulting chunks should be stored into parameters.chunks. - * - */ - virtual void - availableChunks( Writable *, Parameter< Operation::AVAILABLE_CHUNKS > & ) = 0; + /** Report chunks that are available for loading from the dataset + * represented by this writable. + * + * The resulting chunks should be stored into parameters.chunks. + * + */ + virtual void + availableChunks(Writable *, Parameter &) = 0; - /** Create a new file in physical storage, possibly overriding an existing file. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The new file should be located in m_handler->directory. - * The new file should have the filename parameters.name. - * The filename should include the correct corresponding filename extension. - * Any existing file should be overwritten if m_handler->m_frontendAccess is Access::CREATE. - * The Writables file position should correspond to the root group "/" of the hierarchy. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) = 0; - /** Create all necessary groups for a path, possibly recursively. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The path parameters.path may contain multiple levels (e.g. first/second/third/). - * The Writables file position should correspond to the complete newly created path (i.e. first/second/third/ should be assigned to the Writables file position). - * The Writable should be marked written when the operation completes successfully. - */ - virtual void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) = 0; - /** Create a new dataset of given type, extent and storage properties. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The path may contain multiple levels (e.g. group/dataset). - * The new dataset should have the name parameters.name. This name should not start or end with a slash ("/"). - * The new dataset should be of datatype parameters.dtype. - * The new dataset should have an extent of parameters.extent. - * If possible, the new dataset should be extensible. - * If possible, the new dataset should be divided into chunks with size parameters.chunkSize. - * If possible, the new dataset should be compressed according to parameters.compression. This may be format-specific. - * If possible, the new dataset should be transformed accoring to parameters.transform. This may be format-specific. - * The Writables file position should correspond to the newly created dataset. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) = 0; - /** Increase the extent of an existing dataset. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should fail if the dataset does not yet exist. - * The dataset should have the name parameters.name. This name should not start or end with a slash ("/"). - * The operation should fail if the new extent is not strictly large in every dimension. - * The dataset should have an extent of parameters.extent. - */ - virtual void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) = 0; - /** Open an existing file assuming it conforms to openPMD. - * - * The operation should fail if m_handler->directory is not accessible. - * The opened file should have filename parameters.name and include the correct corresponding filename extension. - * The operation should not open files more than once. - * If possible, the file should be opened with read-only permissions if m_handler->m_frontendAccess is Access::READ_ONLY. - * The Writables file position should correspond to the root group "/" of the hierarchy in the opened file. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) = 0; - /** Open all contained groups in a path, possibly recursively. - * - * The operation should overwrite existing file positions, even when the Writable was already marked written. - * The path parameters.path may contain multiple levels (e.g. first/second/third/). This path should be relative (i.e. it should not start with a slash "/"). - * The number of levels may be zero, i.e. parameters.path may be an empty string. - * The Writables file position should correspond to the complete opened path (i.e. first/second/third/ should be assigned to the Writables file position). - * The Writable should be marked written when the operation completes successfully. - */ - virtual void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) = 0; - /** Open an existing dataset and determine its datatype and extent. - * - * The opened dataset should be located in a group below the group of the Writables parent writable->parent. - * The opened datasets name should be parameters.name. This name should not start or end with a slash ("/"). - * The opened datasets datatype should be stored in *(parameters.dtype). - * The opened datasets extent should be stored in *(parameters.extent). - * The Writables file position should correspond to the opened dataset. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) = 0; - /** Delete an existing file from physical storage. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * All handles that correspond to the file should be closed before deletion. - * The file to delete should have the filename parameters.name. - * The filename should include the correct corresponding filename extension. - * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr). - * The Writable should be marked not written when the operation completes successfully. - */ - virtual void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) = 0; - /** Delete all objects within an existing path. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * The path parameters.path may contain multiple levels (e.g. first/second/third/). This path should be relative (i.e. it should not start with a slash "/"). It may also contain the current group ".". - * All groups and datasets starting from the path should not be accessible in physical storage after the operation completes successfully. - * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr). - * The Writable should be marked not written when the operation completes successfully. - */ - virtual void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) = 0; - /** Delete an existing dataset. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * The dataset should have the name parameters.name. This name should not start or end with a slash ("/"). It may also contain the current dataset ".". - * The dataset should not be accessible in physical storage after the operation completes successfully. - * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr). - * The Writable should be marked not written when the operation completes successfully. - */ - virtual void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) = 0; - /** Delete an existing attribute. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * The attribute should be associated with the Writable and have the name parameters.name before deletion. - * The attribute should not be accessible in physical storage after the operation completes successfully. - */ - virtual void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) = 0; - /** Write a chunk of data into an existing dataset. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The dataset should be associated with the Writable. - * The operation should fail if the dataset does not exist. - * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension. - * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset. - * The dataset should match the dataype parameters.dtype. - * The data parameters.data is a cast-to-void pointer to a flattened version of the chunk data. It should be re-cast to the provided datatype. The chunk is stored row-major. - * The region of the chunk should be written to physical storage after the operation completes successfully. - */ - virtual void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) = 0; + /** Create a new file in physical storage, possibly overriding an existing + * file. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The new file should be located in + * m_handler->directory. The new file should have the filename + * parameters.name. The filename should include the correct corresponding + * filename extension. Any existing file should be overwritten if + * m_handler->m_frontendAccess is Access::CREATE. The Writables file + * position should correspond to the root group "/" of the hierarchy. The + * Writable should be marked written when the operation completes + * successfully. + */ + virtual void + createFile(Writable *, Parameter const &) = 0; + /** Create all necessary groups for a path, possibly recursively. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The path parameters.path may contain multiple levels + * (e.g. first/second/third/). The Writables file position should correspond + * to the complete newly created path (i.e. first/second/third/ should be + * assigned to the Writables file position). The Writable should be marked + * written when the operation completes successfully. + */ + virtual void + createPath(Writable *, Parameter const &) = 0; + /** Create a new dataset of given type, extent and storage properties. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The path may contain multiple levels (e.g. + * group/dataset). The new dataset should have the name parameters.name. + * This name should not start or end with a slash ("/"). The new dataset + * should be of datatype parameters.dtype. The new dataset should have an + * extent of parameters.extent. If possible, the new dataset should be + * extensible. If possible, the new dataset should be divided into chunks + * with size parameters.chunkSize. If possible, the new dataset should be + * compressed according to parameters.compression. This may be + * format-specific. If possible, the new dataset should be transformed + * accoring to parameters.transform. This may be format-specific. The + * Writables file position should correspond to the newly created dataset. + * The Writable should be marked written when the operation completes + * successfully. + */ + virtual void + createDataset(Writable *, Parameter const &) = 0; + /** Increase the extent of an existing dataset. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should fail if the dataset does not yet + * exist. The dataset should have the name parameters.name. This name should + * not start or end with a slash ("/"). The operation should fail if the new + * extent is not strictly large in every dimension. The dataset should have + * an extent of parameters.extent. + */ + virtual void + extendDataset(Writable *, Parameter const &) = 0; + /** Open an existing file assuming it conforms to openPMD. + * + * The operation should fail if m_handler->directory is not accessible. + * The opened file should have filename parameters.name and include the + * correct corresponding filename extension. The operation should not open + * files more than once. If possible, the file should be opened with + * read-only permissions if m_handler->m_frontendAccess is + * Access::READ_ONLY. The Writables file position should correspond to the + * root group "/" of the hierarchy in the opened file. The Writable should + * be marked written when the operation completes successfully. + */ + virtual void + openFile(Writable *, Parameter const &) = 0; + /** Open all contained groups in a path, possibly recursively. + * + * The operation should overwrite existing file positions, even when the + * Writable was already marked written. The path parameters.path may contain + * multiple levels (e.g. first/second/third/). This path should be relative + * (i.e. it should not start with a slash "/"). The number of levels may be + * zero, i.e. parameters.path may be an empty string. The Writables file + * position should correspond to the complete opened path (i.e. + * first/second/third/ should be assigned to the Writables file position). + * The Writable should be marked written when the operation completes + * successfully. + */ + virtual void + openPath(Writable *, Parameter const &) = 0; + /** Open an existing dataset and determine its datatype and extent. + * + * The opened dataset should be located in a group below the group of the + * Writables parent writable->parent. The opened datasets name should be + * parameters.name. This name should not start or end with a slash ("/"). + * The opened datasets datatype should be stored in *(parameters.dtype). + * The opened datasets extent should be stored in *(parameters.extent). + * The Writables file position should correspond to the opened dataset. + * The Writable should be marked written when the operation completes + * successfully. + */ + virtual void + openDataset(Writable *, Parameter &) = 0; + /** Delete an existing file from physical storage. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. All handles that correspond to the file should be closed + * before deletion. The file to delete should have the filename + * parameters.name. The filename should include the correct corresponding + * filename extension. The Writables file position should be set to an + * invalid position (i.e. the pointer should be a nullptr). The Writable + * should be marked not written when the operation completes successfully. + */ + virtual void + deleteFile(Writable *, Parameter const &) = 0; + /** Delete all objects within an existing path. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. The path parameters.path may contain multiple levels + * (e.g. first/second/third/). This path should be relative (i.e. it should + * not start with a slash "/"). It may also contain the current group ".". + * All groups and datasets starting from the path should not be accessible + * in physical storage after the operation completes successfully. The + * Writables file position should be set to an invalid position (i.e. the + * pointer should be a nullptr). The Writable should be marked not written + * when the operation completes successfully. + */ + virtual void + deletePath(Writable *, Parameter const &) = 0; + /** Delete an existing dataset. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. The dataset should have the name parameters.name. This + * name should not start or end with a slash ("/"). It may also contain the + * current dataset ".". The dataset should not be accessible in physical + * storage after the operation completes successfully. The Writables file + * position should be set to an invalid position (i.e. the pointer should be + * a nullptr). The Writable should be marked not written when the operation + * completes successfully. + */ + virtual void + deleteDataset(Writable *, Parameter const &) = 0; + /** Delete an existing attribute. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. The attribute should be associated with the Writable and + * have the name parameters.name before deletion. The attribute should not + * be accessible in physical storage after the operation completes + * successfully. + */ + virtual void + deleteAttribute(Writable *, Parameter const &) = 0; + /** Write a chunk of data into an existing dataset. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The dataset should be associated with the Writable. + * The operation should fail if the dataset does not exist. + * The operation should fail if the chunk extent parameters.extent is not + * smaller or equals in every dimension. The operation should fail if chunk + * positions parameters.offset+parameters.extent do not reside inside the + * dataset. The dataset should match the dataype parameters.dtype. The data + * parameters.data is a cast-to-void pointer to a flattened version of the + * chunk data. It should be re-cast to the provided datatype. The chunk is + * stored row-major. The region of the chunk should be written to physical + * storage after the operation completes successfully. + */ + virtual void + writeDataset(Writable *, Parameter const &) = 0; - /** Get a view into a dataset buffer that can be filled by a user. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The dataset should be associated with the Writable. - * The operation should fail if the dataset does not exist. - * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension. - * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset. - * The dataset should match the dataype parameters.dtype. - * The buffer should be stored as a cast-to-char pointer to a flattened version of the backend buffer in parameters.out->ptr. The chunk is stored row-major. - * The buffer's content should be written to storage not before the next call to AbstractIOHandler::flush where AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. - * The precise time of data consumption is defined by the backend: - * * Data written to the returned buffer should be consumed not earlier than the next call to AbstractIOHandler::flush where AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. - * * Data should be consumed not later than the next Operation::ADVANCE task where parameter.mode == AdvanceMode::ENDSTEP. - * - * This IOTask is optional and should either (1) not be implemented by a backend at all or (2) be implemented as indicated above and set parameters.out->backendManagedBuffer = true. - */ - virtual void getBufferView(Writable*, Parameter< Operation::GET_BUFFER_VIEW >& parameters) - { + /** Get a view into a dataset buffer that can be filled by a user. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The dataset should be associated with the Writable. + * The operation should fail if the dataset does not exist. + * The operation should fail if the chunk extent parameters.extent is not + * smaller or equals in every dimension. The operation should fail if chunk + * positions parameters.offset+parameters.extent do not reside inside the + * dataset. The dataset should match the dataype parameters.dtype. The + * buffer should be stored as a cast-to-char pointer to a flattened version + * of the backend buffer in parameters.out->ptr. The chunk is stored + * row-major. The buffer's content should be written to storage not before + * the next call to AbstractIOHandler::flush where + * AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. The precise + * time of data consumption is defined by the backend: + * * Data written to the returned buffer should be consumed not earlier than + * the next call to AbstractIOHandler::flush where + * AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. + * * Data should be consumed not later than the next Operation::ADVANCE task + * where parameter.mode == AdvanceMode::ENDSTEP. + * + * This IOTask is optional and should either (1) not be implemented by a + * backend at all or (2) be implemented as indicated above and set + * parameters.out->backendManagedBuffer = true. + */ + virtual void + getBufferView(Writable *, Parameter ¶meters) + { // default implementation: operation unsupported by backend parameters.out->backendManagedBuffer = false; - } - /** Create a single attribute and fill the value, possibly overwriting an existing attribute. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The attribute should have the name parameters.name. This name should not contain a slash ("/"). - * The attribute should be of datatype parameters.dtype. - * Any existing attribute with the same name should be overwritten. If possible, only the value should be changed if the datatype stays the same. - * The attribute should be written to physical storage after the operation completes successfully. - * All datatypes of Datatype should be supported in a type-safe way. - */ - virtual void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) = 0; - /** Read a chunk of data from an existing dataset. - * - * The dataset should be associated with the Writable. - * The operation should fail if the dataset does not exist. - * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension. - * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset. - * The dataset should match the dataype parameters.dtype. - * The data parameters.data should be a cast-to-void pointer to a flattened version of the chunk data. The chunk should be stored row-major. - * The region of the chunk should be written to the location indicated by the pointer after the operation completes successfully. - */ - virtual void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) = 0; - /** Read the value of an existing attribute. - * - * The operation should fail if the Writable was not marked written. - * The operation should fail if the attribute does not exist. - * The attribute should be associated with the Writable and have the name parameters.name. This name should not contain a slash ("/"). - * The attribute datatype should be stored in the location indicated by the pointer parameters.dtype. - * The attribute value should be stored as a generic Variant::resource in the location indicated by the pointer parameters.resource. - * All datatypes of Datatype should be supported in a type-safe way. - */ - virtual void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) = 0; - /** List all paths/sub-groups inside a group, non-recursively. - * - * The operation should fail if the Writable was not marked written. - * The operation should fail if the Writable is not a group. - * The list of group names should be stored in the location indicated by the pointer parameters.paths. - */ - virtual void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) = 0; - /** List all datasets inside a group, non-recursively. - * - * The operation should fail if the Writable was not marked written. - * The operation should fail if the Writable is not a group. - * The list of dataset names should be stored in the location indicated by the pointer parameters.datasets. - */ - virtual void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) = 0; - /** List all attributes associated with an object. - * - * The operation should fail if the Writable was not marked written. - * The attribute should be associated with the Writable. - * The list of attribute names should be stored in the location indicated by the pointer parameters.attributes. - */ - virtual void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) = 0; + } + /** Create a single attribute and fill the value, possibly overwriting an + * existing attribute. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The attribute should have the name parameters.name. + * This name should not contain a slash ("/"). The attribute should be of + * datatype parameters.dtype. Any existing attribute with the same name + * should be overwritten. If possible, only the value should be changed if + * the datatype stays the same. The attribute should be written to physical + * storage after the operation completes successfully. All datatypes of + * Datatype should be supported in a type-safe way. + */ + virtual void + writeAttribute(Writable *, Parameter const &) = 0; + /** Read a chunk of data from an existing dataset. + * + * The dataset should be associated with the Writable. + * The operation should fail if the dataset does not exist. + * The operation should fail if the chunk extent parameters.extent is not + * smaller or equals in every dimension. The operation should fail if chunk + * positions parameters.offset+parameters.extent do not reside inside the + * dataset. The dataset should match the dataype parameters.dtype. The data + * parameters.data should be a cast-to-void pointer to a flattened version + * of the chunk data. The chunk should be stored row-major. The region of + * the chunk should be written to the location indicated by the pointer + * after the operation completes successfully. + */ + virtual void + readDataset(Writable *, Parameter &) = 0; + /** Read the value of an existing attribute. + * + * The operation should fail if the Writable was not marked written. + * The operation should fail if the attribute does not exist. + * The attribute should be associated with the Writable and have the name + * parameters.name. This name should not contain a slash ("/"). The + * attribute datatype should be stored in the location indicated by the + * pointer parameters.dtype. The attribute value should be stored as a + * generic Variant::resource in the location indicated by the pointer + * parameters.resource. All datatypes of Datatype should be supported in a + * type-safe way. + */ + virtual void + readAttribute(Writable *, Parameter &) = 0; + /** List all paths/sub-groups inside a group, non-recursively. + * + * The operation should fail if the Writable was not marked written. + * The operation should fail if the Writable is not a group. + * The list of group names should be stored in the location indicated by the + * pointer parameters.paths. + */ + virtual void listPaths(Writable *, Parameter &) = 0; + /** List all datasets inside a group, non-recursively. + * + * The operation should fail if the Writable was not marked written. + * The operation should fail if the Writable is not a group. + * The list of dataset names should be stored in the location indicated by + * the pointer parameters.datasets. + */ + virtual void + listDatasets(Writable *, Parameter &) = 0; + /** List all attributes associated with an object. + * + * The operation should fail if the Writable was not marked written. + * The attribute should be associated with the Writable. + * The list of attribute names should be stored in the location indicated by + * the pointer parameters.attributes. + */ + virtual void + listAttributes(Writable *, Parameter &) = 0; - AbstractIOHandler* m_handler; -}; //AbstractIOHandlerImpl -} // openPMD + AbstractIOHandler *m_handler; +}; // AbstractIOHandlerImpl +} // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp b/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp index a7ddae605b..efa8b238ae 100644 --- a/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp @@ -21,7 +21,6 @@ #pragma once - #include "openPMD/IO/AbstractFilePosition.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" @@ -32,26 +31,24 @@ #include #include - - namespace openPMD { -template < typename FilePositionType = AbstractFilePosition > +template class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl { // friend struct detail::BufferedActions; public: - explicit AbstractIOHandlerImplCommon( AbstractIOHandler * handler ); + explicit AbstractIOHandlerImplCommon(AbstractIOHandler *handler); - ~AbstractIOHandlerImplCommon( ) override; + ~AbstractIOHandlerImplCommon() override; protected: /** * map each Writable to its associated file contains only the filename, * without the OS path */ - std::unordered_map< Writable *, InvalidatableFile > m_files; - std::unordered_set< InvalidatableFile > m_dirty; + std::unordered_map m_files; + std::unordered_set m_dirty; enum PossiblyExisting { @@ -60,33 +57,35 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl PE_NewlyCreated, }; - std::tuple< InvalidatableFile, - std::unordered_map< Writable *, InvalidatableFile >::iterator, - bool - > getPossiblyExisting( std::string file ); + std::tuple< + InvalidatableFile, + std::unordered_map::iterator, + bool> + getPossiblyExisting(std::string file); - void associateWithFile( Writable * writable, InvalidatableFile file ); + void associateWithFile(Writable *writable, InvalidatableFile file); /** * * @return Full OS path of the file. */ - std::string fullPath( InvalidatableFile ); + std::string fullPath(InvalidatableFile); - std::string fullPath( std::string ); + std::string fullPath(std::string); /** * Get the writable's containing file. * @param writable The writable whose containing file to figure out. * @param preferParentFile If true, the file is set to the parent's file if * present. Otherwise, the parent file is only considered if no own file - * is defined. This is usually needed when switching between iterations when opening paths. + * is defined. This is usually needed when switching between iterations + * when opening paths. * @return The containing file of the writable. If its parent is associated * with another file, update the writable to match its parent and return * the refreshed file. */ InvalidatableFile - refreshFileFromParent( Writable * writable, bool preferParentFile ); + refreshFileFromParent(Writable *writable, bool preferParentFile); /** * Figure out the file position of the writable. @@ -95,8 +94,8 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl * @param write Whether to refresh the writable's file position. * @return The current file position. */ - std::shared_ptr< FilePositionType > - setAndGetFilePosition( Writable * writable, bool write = true ); + std::shared_ptr + setAndGetFilePosition(Writable *writable, bool write = true); /** * Figure out the file position of the writable and extend it. @@ -104,54 +103,53 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl * @param extend The extension string. * @return The current file position. */ - virtual std::shared_ptr< FilePositionType > - setAndGetFilePosition( Writable * writable, std::string extend ); + virtual std::shared_ptr + setAndGetFilePosition(Writable *writable, std::string extend); /** * @return A string representation of the file position. */ virtual std::string - filePositionToString( std::shared_ptr< FilePositionType > ) = 0; + filePositionToString(std::shared_ptr) = 0; /** * @return A new file position that is extended with the given string. */ - virtual std::shared_ptr< FilePositionType > - extendFilePosition( std::shared_ptr< FilePositionType > const &, - std::string ) = 0; + virtual std::shared_ptr extendFilePosition( + std::shared_ptr const &, std::string) = 0; }; -template < typename FilePositionType > -AbstractIOHandlerImplCommon< FilePositionType >::AbstractIOHandlerImplCommon( - AbstractIOHandler * handler ) -: AbstractIOHandlerImpl{handler} -{ -} - - -template < typename FilePositionType > -AbstractIOHandlerImplCommon< - FilePositionType >::~AbstractIOHandlerImplCommon( ) = default; - - -template < typename FilePositionType > -std::tuple< InvalidatableFile, - std::unordered_map< Writable *, InvalidatableFile >::iterator, - bool > -AbstractIOHandlerImplCommon< FilePositionType >::getPossiblyExisting( - std::string file ) +template +AbstractIOHandlerImplCommon::AbstractIOHandlerImplCommon( + AbstractIOHandler *handler) + : AbstractIOHandlerImpl{handler} +{} + +template +AbstractIOHandlerImplCommon::~AbstractIOHandlerImplCommon() = + default; + +template +std::tuple< + InvalidatableFile, + std::unordered_map::iterator, + bool> +AbstractIOHandlerImplCommon::getPossiblyExisting( + std::string file) { auto it = std::find_if( - m_files.begin( ), m_files.end( ), - [file]( std::unordered_map< - Writable *, InvalidatableFile >::value_type const & entry ) { - return *entry.second == file && entry.second.valid( ); - } ); + m_files.begin(), + m_files.end(), + [file]( + std::unordered_map::value_type const + &entry) { + return *entry.second == file && entry.second.valid(); + }); bool newlyCreated; InvalidatableFile name; - if ( it == m_files.end( ) ) + if (it == m_files.end()) { name = file; newlyCreated = true; @@ -163,33 +161,30 @@ AbstractIOHandlerImplCommon< FilePositionType >::getPossiblyExisting( } return std::tuple< InvalidatableFile, - std::unordered_map< Writable *, InvalidatableFile >::iterator, bool >( - std::move( name ), it, newlyCreated ); + std::unordered_map::iterator, + bool>(std::move(name), it, newlyCreated); } - -template < typename FilePositionType > -void AbstractIOHandlerImplCommon< FilePositionType >::associateWithFile( - Writable * writable, InvalidatableFile file ) +template +void AbstractIOHandlerImplCommon::associateWithFile( + Writable *writable, InvalidatableFile file) { // make sure to overwrite - m_files[writable] = std::move( file ); + m_files[writable] = std::move(file); } - -template < typename FilePositionType > -std::string AbstractIOHandlerImplCommon< FilePositionType >::fullPath( - InvalidatableFile fileName ) +template +std::string AbstractIOHandlerImplCommon::fullPath( + InvalidatableFile fileName) { - return fullPath( *fileName ); + return fullPath(*fileName); } - -template < typename FilePositionType > -std::string AbstractIOHandlerImplCommon< FilePositionType >::fullPath( - std::string fileName ) +template +std::string +AbstractIOHandlerImplCommon::fullPath(std::string fileName) { - if ( auxiliary::ends_with( m_handler->directory, "/" ) ) + if (auxiliary::ends_with(m_handler->directory, "/")) { return m_handler->directory + fileName; } @@ -199,77 +194,76 @@ std::string AbstractIOHandlerImplCommon< FilePositionType >::fullPath( } } -template< typename FilePositionType > +template InvalidatableFile -AbstractIOHandlerImplCommon< FilePositionType >::refreshFileFromParent( - Writable * writable, bool preferParentFile ) +AbstractIOHandlerImplCommon::refreshFileFromParent( + Writable *writable, bool preferParentFile) { - auto getFileFromParent = [ writable, this ]() { - auto file = m_files.find( writable->parent )->second; - associateWithFile( writable, file ); + auto getFileFromParent = [writable, this]() { + auto file = m_files.find(writable->parent)->second; + associateWithFile(writable, file); return file; }; - if( preferParentFile && writable->parent ) + if (preferParentFile && writable->parent) { return getFileFromParent(); } else { - auto it = m_files.find( writable ); - if( it != m_files.end() ) + auto it = m_files.find(writable); + if (it != m_files.end()) { - return m_files.find( writable )->second; + return m_files.find(writable)->second; } - else if( writable->parent ) + else if (writable->parent) { return getFileFromParent(); } else { throw std::runtime_error( - "Internal error: Root object must be opened explicitly." ); + "Internal error: Root object must be opened explicitly."); } } } -template< typename FilePositionType > -std::shared_ptr< FilePositionType > -AbstractIOHandlerImplCommon< FilePositionType >::setAndGetFilePosition( - Writable * writable, bool write ) +template +std::shared_ptr +AbstractIOHandlerImplCommon::setAndGetFilePosition( + Writable *writable, bool write) { - std::shared_ptr< AbstractFilePosition > res; + std::shared_ptr res; - if ( writable->abstractFilePosition ) + if (writable->abstractFilePosition) { res = writable->abstractFilePosition; } - else if ( writable->parent ) + else if (writable->parent) { res = writable->parent->abstractFilePosition; } else { // we are root - res = std::make_shared< FilePositionType >( ); + res = std::make_shared(); } - if ( write ) + if (write) { writable->abstractFilePosition = res; } - return std::dynamic_pointer_cast< FilePositionType >( res ); + return std::dynamic_pointer_cast(res); } - -template < typename FilePositionType > -std::shared_ptr< FilePositionType > -AbstractIOHandlerImplCommon< FilePositionType >::setAndGetFilePosition( - Writable * writable, std::string extend ) +template +std::shared_ptr +AbstractIOHandlerImplCommon::setAndGetFilePosition( + Writable *writable, std::string extend) { - if ( !auxiliary::starts_with( extend, '/' ) ) + if (!auxiliary::starts_with(extend, '/')) { extend = "/" + extend; } - auto oldPos = setAndGetFilePosition( writable, false ); - auto res = extendFilePosition( oldPos, extend ); + auto oldPos = setAndGetFilePosition(writable, false); + auto res = extendFilePosition(oldPos, extend); writable->abstractFilePosition = res; return res; diff --git a/include/openPMD/IO/Access.hpp b/include/openPMD/IO/Access.hpp index 272ef80e80..93ba662241 100644 --- a/include/openPMD/IO/Access.hpp +++ b/include/openPMD/IO/Access.hpp @@ -20,24 +20,25 @@ */ #pragma once - namespace openPMD { - /** File access mode to use during IO. - */ - enum class Access - { - READ_ONLY, //!< open series as read-only, fails if series is not found - READ_WRITE, //!< open existing series as writable - CREATE //!< create new series and truncate existing (files) - }; // Access - +/** File access mode to use during IO. + */ +enum class Access +{ + READ_ONLY, //!< open series as read-only, fails if series is not found + READ_WRITE, //!< open existing series as writable + CREATE //!< create new series and truncate existing (files) +}; // Access - // deprecated name (used prior to 0.12.0) - // note: "using old [[deprecated(msg)]] = new;" is still badly supported, thus using typedef - // https://en.cppreference.com/w/cpp/language/attributes/deprecated - // - NVCC < 11.0.167 works but noisy "warning: attribute does not apply to any entity" - // Nvidia bug report: 2991260 - // - Intel C++ 19.1.0.20200306 bug report: 04651484 - [[deprecated("AccessType is deprecated, use Access instead.")]] typedef Access AccessType; +// deprecated name (used prior to 0.12.0) +// note: "using old [[deprecated(msg)]] = new;" is still badly supported, thus +// using typedef +// https://en.cppreference.com/w/cpp/language/attributes/deprecated +// - NVCC < 11.0.167 works but noisy "warning: attribute does not apply to any +// entity" +// Nvidia bug report: 2991260 +// - Intel C++ 19.1.0.20200306 bug report: 04651484 +[[deprecated("AccessType is deprecated, use Access instead.")]] typedef Access + AccessType; } // namespace openPMD diff --git a/include/openPMD/IO/DummyIOHandler.hpp b/include/openPMD/IO/DummyIOHandler.hpp index 50ad87cdd5..9a4f3c3852 100644 --- a/include/openPMD/IO/DummyIOHandler.hpp +++ b/include/openPMD/IO/DummyIOHandler.hpp @@ -24,25 +24,26 @@ #include "openPMD/IO/Access.hpp" #include "openPMD/IO/IOTask.hpp" -#include #include - +#include namespace openPMD { - /** Dummy handler without any IO operations. - */ - class DummyIOHandler : public AbstractIOHandler - { - public: - DummyIOHandler(std::string, Access); - ~DummyIOHandler() override = default; +/** Dummy handler without any IO operations. + */ +class DummyIOHandler : public AbstractIOHandler +{ +public: + DummyIOHandler(std::string, Access); + ~DummyIOHandler() override = default; - /** No-op consistent with the IOHandler interface to enable library use without IO. - */ - void enqueue(IOTask const&) override; - /** No-op consistent with the IOHandler interface to enable library use without IO. - */ - std::future< void > flush() override; - }; // DummyIOHandler + /** No-op consistent with the IOHandler interface to enable library use + * without IO. + */ + void enqueue(IOTask const &) override; + /** No-op consistent with the IOHandler interface to enable library use + * without IO. + */ + std::future flush(internal::FlushParams const &) override; +}; // DummyIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/Format.hpp b/include/openPMD/IO/Format.hpp index 7b277b72b1..993f7bae90 100644 --- a/include/openPMD/IO/Format.hpp +++ b/include/openPMD/IO/Format.hpp @@ -22,33 +22,32 @@ #include - namespace openPMD { - /** File format to use during IO. - */ - enum class Format - { - HDF5, - ADIOS1, - ADIOS2, - ADIOS2_SST, - ADIOS2_SSC, - JSON, - DUMMY - }; +/** File format to use during IO. + */ +enum class Format +{ + HDF5, + ADIOS1, + ADIOS2, + ADIOS2_SST, + ADIOS2_SSC, + JSON, + DUMMY +}; - /** Determine the storage format of a Series from the used filename extension. - * - * @param filename string containing the filename. - * @return Format that best fits the filename extension. - */ - Format determineFormat(std::string const& filename); +/** Determine the storage format of a Series from the used filename extension. + * + * @param filename string containing the filename. + * @return Format that best fits the filename extension. + */ +Format determineFormat(std::string const &filename); - /** Determine the default filename suffix for a given storage format. - * - * @param f File format to determine suffix for. - * @return String containing the default filename suffix - */ - std::string suffix(Format f); -} // openPMD +/** Determine the default filename suffix for a given storage format. + * + * @param f File format to determine suffix for. + * @return String containing the default filename suffix + */ +std::string suffix(Format f); +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp b/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp index 4785cf4f66..da7ff2f68f 100644 --- a/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp +++ b/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp @@ -20,9 +20,9 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" +#include "openPMD/config.hpp" #include @@ -31,42 +31,36 @@ #include #include - namespace openPMD { - struct GetH5DataType - { - std::unordered_map< std::string, hid_t > m_userTypes; +struct GetH5DataType +{ + std::unordered_map m_userTypes; - GetH5DataType( std::unordered_map< std::string, hid_t > userTypes ) - : m_userTypes{ std::move(userTypes) } - { - } + GetH5DataType(std::unordered_map userTypes) + : m_userTypes{std::move(userTypes)} + {} - hid_t - operator()(Attribute const &att); - }; + hid_t operator()(Attribute const &att); +}; - hid_t - getH5DataSpace(Attribute const& att); +hid_t getH5DataSpace(Attribute const &att); - std::string - concrete_h5_file_position(Writable* w); +std::string concrete_h5_file_position(Writable *w); - /** Computes the chunk dimensions for a dataset. - * - * Chunk dimensions are selected to create chunks sizes between - * 64KByte and 4MB. Smaller chunk sizes are inefficient due to overhead, - * larger chunks do not map well to file system blocks and striding. - * - * Chunk dimensions are less or equal to dataset dimensions and do - * not need to be a factor of the respective dataset dimension. - * - * @param[in] dims dimensions of dataset to get chunk dims for - * @param[in] typeSize size of each element in bytes - * @return array for resulting chunk dimensions - */ - std::vector< hsize_t > - getOptimalChunkDims( std::vector< hsize_t > const dims, - size_t const typeSize ); +/** Computes the chunk dimensions for a dataset. + * + * Chunk dimensions are selected to create chunks sizes between + * 64KByte and 4MB. Smaller chunk sizes are inefficient due to overhead, + * larger chunks do not map well to file system blocks and striding. + * + * Chunk dimensions are less or equal to dataset dimensions and do + * not need to be a factor of the respective dataset dimension. + * + * @param[in] dims dimensions of dataset to get chunk dims for + * @param[in] typeSize size of each element in bytes + * @return array for resulting chunk dimensions + */ +std::vector +getOptimalChunkDims(std::vector const dims, size_t const typeSize); } // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5FilePosition.hpp b/include/openPMD/IO/HDF5/HDF5FilePosition.hpp index 3537e3252e..eb615907b7 100644 --- a/include/openPMD/IO/HDF5/HDF5FilePosition.hpp +++ b/include/openPMD/IO/HDF5/HDF5FilePosition.hpp @@ -22,15 +22,13 @@ #include "openPMD/IO/AbstractFilePosition.hpp" - namespace openPMD { struct HDF5FilePosition : public AbstractFilePosition { - HDF5FilePosition(std::string const& s) - : location{s} - { } + HDF5FilePosition(std::string const &s) : location{s} + {} std::string location; -}; //HDF5FilePosition -} // openPMD +}; // HDF5FilePosition +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp index 3dfc1f6e1b..da1f1ff130 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp @@ -28,7 +28,6 @@ #include #include - namespace openPMD { class HDF5IOHandlerImpl; @@ -39,11 +38,14 @@ class HDF5IOHandler : public AbstractIOHandler HDF5IOHandler(std::string path, Access, nlohmann::json config); ~HDF5IOHandler() override; - std::string backendName() const override { return "HDF5"; } + std::string backendName() const override + { + return "HDF5"; + } - std::future< void > flush() override; + std::future flush(internal::FlushParams const &) override; private: - std::unique_ptr< HDF5IOHandlerImpl > m_impl; + std::unique_ptr m_impl; }; // HDF5IOHandler -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp index 815c57516e..0391cc386f 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp @@ -22,78 +22,89 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_HDF5 -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" -# include "openPMD/auxiliary/JSON.hpp" -# include "openPMD/auxiliary/Option.hpp" +#include "openPMD/auxiliary/JSON.hpp" +#include "openPMD/auxiliary/Option.hpp" -# include -# include -# include +#include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_HDF5 - class HDF5IOHandlerImpl : public AbstractIOHandlerImpl - { - public: - HDF5IOHandlerImpl(AbstractIOHandler*, nlohmann::json config); - ~HDF5IOHandlerImpl() override; +class HDF5IOHandlerImpl : public AbstractIOHandlerImpl +{ +public: + HDF5IOHandlerImpl(AbstractIOHandler *, nlohmann::json config); + ~HDF5IOHandlerImpl() override; - void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override; - void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override; - void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override; - void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override; - void availableChunks(Writable *, Parameter< Operation::AVAILABLE_CHUNKS > &) override; - void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override; - void closeFile(Writable*, Parameter< Operation::CLOSE_FILE > const&) override; - void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override; - void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override; - void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override; - void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override; - void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override; - void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override; - void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override; - void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override; - void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override; - void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override; - void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override; - void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override; - void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override; + void + createFile(Writable *, Parameter const &) override; + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; + void extendDataset( + Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; + void openFile(Writable *, Parameter const &) override; + void + closeFile(Writable *, Parameter const &) override; + void openPath(Writable *, Parameter const &) override; + void openDataset(Writable *, Parameter &) override; + void + deleteFile(Writable *, Parameter const &) override; + void + deletePath(Writable *, Parameter const &) override; + void deleteDataset( + Writable *, Parameter const &) override; + void deleteAttribute( + Writable *, Parameter const &) override; + void writeDataset( + Writable *, Parameter const &) override; + void writeAttribute( + Writable *, Parameter const &) override; + void readDataset(Writable *, Parameter &) override; + void readAttribute(Writable *, Parameter &) override; + void listPaths(Writable *, Parameter &) override; + void + listDatasets(Writable *, Parameter &) override; + void listAttributes(Writable *, Parameter &) override; - std::unordered_map< Writable*, std::string > m_fileNames; - std::unordered_map< std::string, hid_t > m_fileNamesWithID; + std::unordered_map m_fileNames; + std::unordered_map m_fileNamesWithID; - std::unordered_set< hid_t > m_openFileIDs; + std::unordered_set m_openFileIDs; - hid_t m_datasetTransferProperty; - hid_t m_fileAccessProperty; - hid_t m_fileCreateProperty; + hid_t m_datasetTransferProperty; + hid_t m_fileAccessProperty; + hid_t m_fileCreateProperty; - hbool_t m_hdf5_collective_metadata = 1; + hbool_t m_hdf5_collective_metadata = 1; - // h5py compatible types for bool and complex - hid_t m_H5T_BOOL_ENUM; - hid_t m_H5T_CFLOAT; - hid_t m_H5T_CDOUBLE; - hid_t m_H5T_CLONG_DOUBLE; + // h5py compatible types for bool and complex + hid_t m_H5T_BOOL_ENUM; + hid_t m_H5T_CFLOAT; + hid_t m_H5T_CDOUBLE; + hid_t m_H5T_CLONG_DOUBLE; - private: - auxiliary::TracingJSON m_config; - std::string m_chunks = "auto"; - struct File - { - std::string name; - hid_t id; - }; - auxiliary::Option< File > getFile( Writable * ); - }; // HDF5IOHandlerImpl -#else - class HDF5IOHandlerImpl +private: + auxiliary::TracingJSON m_config; + std::string m_chunks = "auto"; + struct File { - }; // HDF5IOHandlerImpl + std::string name; + hid_t id; + }; + auxiliary::Option getFile(Writable *); +}; // HDF5IOHandlerImpl +#else +class HDF5IOHandlerImpl +{}; // HDF5IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp index 70cb681f0d..cc660464fc 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp @@ -20,8 +20,8 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/config.hpp" #include @@ -29,27 +29,29 @@ #include #include - namespace openPMD { - class ParallelHDF5IOHandlerImpl; +class ParallelHDF5IOHandlerImpl; - class ParallelHDF5IOHandler : public AbstractIOHandler +class ParallelHDF5IOHandler : public AbstractIOHandler +{ +public: +#if openPMD_HAVE_MPI + ParallelHDF5IOHandler( + std::string path, Access, MPI_Comm, nlohmann::json config); +#else + ParallelHDF5IOHandler(std::string path, Access, nlohmann::json config); +#endif + ~ParallelHDF5IOHandler() override; + + std::string backendName() const override { - public: - #if openPMD_HAVE_MPI - ParallelHDF5IOHandler( - std::string path, Access, MPI_Comm, nlohmann::json config); - #else - ParallelHDF5IOHandler(std::string path, Access, nlohmann::json config); - #endif - ~ParallelHDF5IOHandler() override; - - std::string backendName() const override { return "MPI_HDF5"; } - - std::future< void > flush() override; - - private: - std::unique_ptr< ParallelHDF5IOHandlerImpl > m_impl; - }; // ParallelHDF5IOHandler -} // openPMD + return "MPI_HDF5"; + } + + std::future flush(internal::FlushParams const &) override; + +private: + std::unique_ptr m_impl; +}; // ParallelHDF5IOHandler +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp index d0e18dc85c..115844826b 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp @@ -20,34 +20,32 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_MPI -# include -# if openPMD_HAVE_HDF5 -# include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" -# include -# endif +#include +#if openPMD_HAVE_HDF5 +#include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" +#include +#endif #endif - namespace openPMD { #if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI - class ParallelHDF5IOHandlerImpl : public HDF5IOHandlerImpl - { - public: - ParallelHDF5IOHandlerImpl( - AbstractIOHandler*, MPI_Comm, nlohmann::json config); - ~ParallelHDF5IOHandlerImpl() override; +class ParallelHDF5IOHandlerImpl : public HDF5IOHandlerImpl +{ +public: + ParallelHDF5IOHandlerImpl( + AbstractIOHandler *, MPI_Comm, nlohmann::json config); + ~ParallelHDF5IOHandlerImpl() override; - MPI_Comm m_mpiComm; - MPI_Info m_mpiInfo; - }; // ParallelHDF5IOHandlerImpl + MPI_Comm m_mpiComm; + MPI_Info m_mpiInfo; +}; // ParallelHDF5IOHandlerImpl #else - class ParallelHDF5IOHandlerImpl - { - }; // ParallelHDF5IOHandlerImpl +class ParallelHDF5IOHandlerImpl +{}; // ParallelHDF5IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index d455556e69..a31a92d065 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -20,61 +20,44 @@ */ #pragma once -#include "openPMD/auxiliary/Export.hpp" -#include "openPMD/auxiliary/Variant.hpp" -#include "openPMD/backend/Attribute.hpp" #include "openPMD/ChunkInfo.hpp" #include "openPMD/Dataset.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/Streaming.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/auxiliary/Variant.hpp" +#include "openPMD/backend/Attribute.hpp" -#include #include +#include #include #include #include - namespace openPMD { class AttributableInterface; class Writable; -Writable* -getWritable(AttributableInterface*); +Writable *getWritable(AttributableInterface *); /** Type of IO operation between logical and persistent data. */ -OPENPMDAPI_EXPORT_ENUM_CLASS(Operation) -{ - CREATE_FILE, - OPEN_FILE, - CLOSE_FILE, - DELETE_FILE, - - CREATE_PATH, - CLOSE_PATH, - OPEN_PATH, - DELETE_PATH, +OPENPMDAPI_EXPORT_ENUM_CLASS(Operation){ + CREATE_FILE, OPEN_FILE, CLOSE_FILE, DELETE_FILE, + + CREATE_PATH, CLOSE_PATH, OPEN_PATH, DELETE_PATH, LIST_PATHS, - CREATE_DATASET, - EXTEND_DATASET, - OPEN_DATASET, - DELETE_DATASET, - WRITE_DATASET, - READ_DATASET, - LIST_DATASETS, - GET_BUFFER_VIEW, + CREATE_DATASET, EXTEND_DATASET, OPEN_DATASET, DELETE_DATASET, + WRITE_DATASET, READ_DATASET, LIST_DATASETS, GET_BUFFER_VIEW, - DELETE_ATT, - WRITE_ATT, - READ_ATT, - LIST_ATTS, + DELETE_ATT, WRITE_ATT, READ_ATT, LIST_ATTS, ADVANCE, AVAILABLE_CHUNKS //!< Query chunks that can be loaded in a dataset -}; // note: if you change the enum members here, please update docs/source/dev/design.rst +}; // note: if you change the enum members here, please update + // docs/source/dev/design.rst namespace internal { @@ -82,22 +65,23 @@ namespace internal * The returned strings are compile-time constants, so no worries about * pointer validity. */ - std::string operationAsString( Operation ); -} + std::string operationAsString(Operation); +} // namespace internal struct OPENPMDAPI_EXPORT AbstractParameter { virtual ~AbstractParameter() = default; AbstractParameter() = default; - //AbstractParameter(AbstractParameter&&) = default; + // AbstractParameter(AbstractParameter&&) = default; // avoid object slicing - AbstractParameter(const AbstractParameter&) = delete; - AbstractParameter& operator=(const AbstractParameter&) = delete; - virtual std::unique_ptr< AbstractParameter > clone() const = 0; + AbstractParameter(const AbstractParameter &) = delete; + AbstractParameter &operator=(const AbstractParameter &) = delete; + virtual std::unique_ptr clone() const = 0; }; -/** @brief Typesafe description of all required arguments for a specified Operation. +/** @brief Typesafe description of all required arguments for a specified + * Operation. * * @note Input operations (i.e. ones that transfer data from persistent files * to logical representations in openPMD-api) use shared pointers to @@ -105,7 +89,7 @@ struct OPENPMDAPI_EXPORT AbstractParameter * valid after the Operation has completed. * @tparam Operation Type of Operation to be executed. */ -template< Operation > +template struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = delete; @@ -113,36 +97,38 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter Parameter(Parameter &&) = delete; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : - AbstractParameter(), name(p.name), encoding(p.encoding) {} + Parameter(Parameter const &p) + : AbstractParameter(), name(p.name), encoding(p.encoding) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CREATE_FILE >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; IterationEncoding encoding = IterationEncoding::groupBased; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : - AbstractParameter(), name(p.name), encoding(p.encoding) {} + Parameter(Parameter const &p) + : AbstractParameter(), name(p.name), encoding(p.encoding) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::OPEN_FILE >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; @@ -154,137 +140,147 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_FILE > : public AbstractPara IterationEncoding encoding = IterationEncoding::groupBased; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CLOSE_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & ) : AbstractParameter() {} + Parameter(Parameter const &) : AbstractParameter() + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CLOSE_FILE >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), name(p.name) {} + Parameter(Parameter const &p) : AbstractParameter(), name(p.name) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_FILE >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), path(p.path) {} + Parameter(Parameter const &p) : AbstractParameter(), path(p.path) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CREATE_PATH >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string path = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CLOSE_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & ) : AbstractParameter() - { - } + Parameter(Parameter const &) : AbstractParameter() + {} - Parameter & - operator=( Parameter const & ) + Parameter &operator=(Parameter const &) { return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CLOSE_PATH >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), path(p.path) {} + Parameter(Parameter const &p) : AbstractParameter(), path(p.path) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::OPEN_PATH >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string path = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), path(p.path) {} + Parameter(Parameter const &p) : AbstractParameter(), path(p.path) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_PATH >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string path = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::LIST_PATHS > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), paths(p.paths) {} + Parameter(Parameter const &p) : AbstractParameter(), paths(p.paths) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::LIST_PATHS >(*this)); + return std::unique_ptr( + new Parameter(*this)); } - std::shared_ptr< std::vector< std::string > > paths - = std::make_shared< std::vector< std::string > >(); + std::shared_ptr > paths = + std::make_shared >(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), extent(p.extent), dtype(p.dtype), - chunkSize(p.chunkSize), compression(p.compression), - transform(p.transform), options(p.options) {} + Parameter(Parameter const &p) + : AbstractParameter() + , name(p.name) + , extent(p.extent) + , dtype(p.dtype) + , chunkSize(p.chunkSize) + , compression(p.compression) + , transform(p.transform) + , options(p.options) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CREATE_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; @@ -296,68 +292,75 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_DATASET > : public Abstrac std::string options = "{}"; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::EXTEND_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), extent(p.extent) {} + Parameter(Parameter const &p) : AbstractParameter(), extent(p.extent) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::EXTEND_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } Extent extent = {}; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), dtype(p.dtype), extent(p.extent) {} + Parameter(Parameter const &p) + : AbstractParameter(), name(p.name), dtype(p.dtype), extent(p.extent) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::OPEN_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; - std::shared_ptr< Datatype > dtype - = std::make_shared< Datatype >(); - std::shared_ptr< Extent > extent - = std::make_shared< Extent >(); + std::shared_ptr dtype = std::make_shared(); + std::shared_ptr extent = std::make_shared(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), name(p.name) {} + Parameter(Parameter const &p) : AbstractParameter(), name(p.name) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - extent(p.extent), offset(p.offset), dtype(p.dtype), - data(p.data) {} + Parameter(Parameter const &p) + : AbstractParameter() + , extent(p.extent) + , offset(p.offset) + , dtype(p.dtype) + , data(p.data) + {} - Parameter& operator=(const Parameter& p) { + Parameter &operator=(const Parameter &p) + { this->extent = p.extent; this->offset = p.offset; this->dtype = p.dtype; @@ -365,28 +368,33 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_DATASET > : public Abstract return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::WRITE_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } Extent extent = {}; Offset offset = {}; Datatype dtype = Datatype::UNDEFINED; - std::shared_ptr< void const > data = nullptr; + std::shared_ptr data = nullptr; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::READ_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - extent(p.extent), offset(p.offset), dtype(p.dtype), - data(p.data) {} + Parameter(Parameter const &p) + : AbstractParameter() + , extent(p.extent) + , offset(p.offset) + , dtype(p.dtype) + , data(p.data) + {} - Parameter& operator=(const Parameter &p) { + Parameter &operator=(const Parameter &p) + { this->extent = p.extent; this->offset = p.offset; this->dtype = p.dtype; @@ -394,51 +402,54 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::READ_DATASET > : public AbstractP return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::READ_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } Extent extent = {}; Offset offset = {}; Datatype dtype = Datatype::UNDEFINED; - std::shared_ptr< void > data = nullptr; + std::shared_ptr data = nullptr; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::LIST_DATASETS > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - datasets(p.datasets) {} + Parameter(Parameter const &p) : AbstractParameter(), datasets(p.datasets) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::LIST_DATASETS >(*this)); + return std::unique_ptr( + new Parameter(*this)); } - std::shared_ptr< std::vector< std::string > > datasets - = std::make_shared< std::vector< std::string > >(); + std::shared_ptr > datasets = + std::make_shared >(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::GET_BUFFER_VIEW > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - offset(p.offset), extent(p.extent), dtype(p.dtype), update(p.update), - out(p.out) + Parameter(Parameter const &p) + : AbstractParameter() + , offset(p.offset) + , extent(p.extent) + , dtype(p.dtype) + , update(p.update) + , out(p.out) {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::GET_BUFFER_VIEW >(*this)); + return std::unique_ptr( + new Parameter(*this)); } // in parameters @@ -453,37 +464,42 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::GET_BUFFER_VIEW > : public Abstra unsigned viewIndex = 0; void *ptr = nullptr; }; - std::shared_ptr< OutParameters > out = std::make_shared< OutParameters >(); + std::shared_ptr out = std::make_shared(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_ATT > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), name(p.name) {} + Parameter(Parameter const &p) : AbstractParameter(), name(p.name) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_ATT >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_ATT > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), dtype(p.dtype), resource(p.resource) {} + Parameter(Parameter const &p) + : AbstractParameter() + , name(p.name) + , dtype(p.dtype) + , resource(p.resource) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::WRITE_ATT >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; @@ -491,100 +507,101 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_ATT > : public AbstractPara Attribute::resource resource; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::READ_ATT > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), dtype(p.dtype), resource(p.resource) {} + Parameter(Parameter const &p) + : AbstractParameter() + , name(p.name) + , dtype(p.dtype) + , resource(p.resource) + {} - Parameter& operator=(const Parameter &p) { + Parameter &operator=(const Parameter &p) + { this->name = p.name; this->dtype = p.dtype; this->resource = p.resource; return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::READ_ATT >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; - std::shared_ptr< Datatype > dtype - = std::make_shared< Datatype >(); - std::shared_ptr< Attribute::resource > resource - = std::make_shared< Attribute::resource >(); + std::shared_ptr dtype = std::make_shared(); + std::shared_ptr resource = + std::make_shared(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::LIST_ATTS > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - attributes(p.attributes) {} + Parameter(Parameter const &p) + : AbstractParameter(), attributes(p.attributes) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::LIST_ATTS >(*this)); + return std::unique_ptr( + new Parameter(*this)); } - std::shared_ptr< std::vector< std::string > > attributes - = std::make_shared< std::vector< std::string > >(); + std::shared_ptr > attributes = + std::make_shared >(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::ADVANCE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & p ) - : AbstractParameter(), mode( p.mode ), status( p.status ) - { - } + Parameter(Parameter const &p) + : AbstractParameter(), mode(p.mode), status(p.status) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::ADVANCE >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } //! input parameter AdvanceMode mode; //! output parameter - std::shared_ptr< AdvanceStatus > status = - std::make_shared< AdvanceStatus >( AdvanceStatus::OK ); + std::shared_ptr status = + std::make_shared(AdvanceStatus::OK); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::AVAILABLE_CHUNKS > +template <> +struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & p ) : AbstractParameter(), chunks( p.chunks ) - { - } + Parameter(Parameter const &p) : AbstractParameter(), chunks(p.chunks) + {} - Parameter & - operator=( Parameter const & p ) + Parameter &operator=(Parameter const &p) { chunks = p.chunks; return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::AVAILABLE_CHUNKS >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } // output parameter - std::shared_ptr< ChunkTable > chunks = std::make_shared< ChunkTable >(); + std::shared_ptr chunks = std::make_shared(); }; /** @brief Self-contained description of a single IO operation. @@ -601,32 +618,28 @@ class OPENPMDAPI_EXPORT IOTask /** Constructor for self-contained description of single IO operation. * * @tparam op Type of Operation to be executed. - * @param w Writable indicating the location of the object being operated on. - * @param p Parameter object supplying all required input and/or output parameters to the operation. + * @param w Writable indicating the location of the object being + * operated on. + * @param p Parameter object supplying all required input and/or output + * parameters to the operation. */ - template< Operation op > - explicit IOTask(Writable* w, - Parameter< op > const & p) - : writable{w}, - operation{op}, - parameter{p.clone()} - { } - - template< Operation op > - explicit IOTask(AttributableInterface* a, - Parameter< op > const & p) - : writable{getWritable(a)}, - operation{op}, - parameter{p.clone()} - { } - - explicit IOTask(IOTask const & other) : - writable{other.writable}, - operation{other.operation}, - parameter{other.parameter} + template + explicit IOTask(Writable *w, Parameter const &p) + : writable{w}, operation{op}, parameter{p.clone()} + {} + + template + explicit IOTask(AttributableInterface *a, Parameter const &p) + : writable{getWritable(a)}, operation{op}, parameter{p.clone()} + {} + + explicit IOTask(IOTask const &other) + : writable{other.writable} + , operation{other.operation} + , parameter{other.parameter} {} - IOTask& operator=(IOTask const & other) + IOTask &operator=(IOTask const &other) { writable = other.writable; operation = other.operation; @@ -634,8 +647,8 @@ class OPENPMDAPI_EXPORT IOTask return *this; } - Writable* writable; + Writable *writable; Operation operation; - std::shared_ptr< AbstractParameter > parameter; -}; // IOTask + std::shared_ptr parameter; +}; // IOTask } // namespace openPMD diff --git a/include/openPMD/IO/InvalidatableFile.hpp b/include/openPMD/IO/InvalidatableFile.hpp index 006c3829e5..31c9fd3fcc 100644 --- a/include/openPMD/IO/InvalidatableFile.hpp +++ b/include/openPMD/IO/InvalidatableFile.hpp @@ -20,77 +20,66 @@ */ #pragma once - -#include #include - +#include namespace openPMD { - /** - * Wrapper around a shared pointer to: - * * a filename - * * and a boolean indicating whether the file still exists - * The wrapper adds no extra information, but some commodity functions. - * Invariant for any context within which this class shall be used: - * For any valid filename, there is at any time at most one - * such shared pointer (wrapper) known in said context's data structures - * (counting by pointer equality) - * This means, that a file can be invalidated (i.e. deleted or overwritten) - * by simply searching for one instance of the file among all known files and - * invalidating this instance - * A new instance may hence only be created after making sure that there are - * no valid instances in the data structures. - */ - struct InvalidatableFile - { - explicit InvalidatableFile( std::string s ); - - - InvalidatableFile( ) = default; - - - struct FileState - { - explicit FileState( std::string s ); - - std::string name; - bool valid = true; - }; - - std::shared_ptr< FileState > fileState; - - - void invalidate( ); - +/** + * Wrapper around a shared pointer to: + * * a filename + * * and a boolean indicating whether the file still exists + * The wrapper adds no extra information, but some commodity functions. + * Invariant for any context within which this class shall be used: + * For any valid filename, there is at any time at most one + * such shared pointer (wrapper) known in said context's data structures + * (counting by pointer equality) + * This means, that a file can be invalidated (i.e. deleted or overwritten) + * by simply searching for one instance of the file among all known files and + * invalidating this instance + * A new instance may hence only be created after making sure that there are + * no valid instances in the data structures. + */ +struct InvalidatableFile +{ + explicit InvalidatableFile(std::string s); - bool valid( ) const; + InvalidatableFile() = default; + struct FileState + { + explicit FileState(std::string s); - InvalidatableFile & operator=( std::string s ); + std::string name; + bool valid = true; + }; + std::shared_ptr fileState; - bool operator==( InvalidatableFile const & f ) const; + void invalidate(); + bool valid() const; - std::string & operator*( ) const; + InvalidatableFile &operator=(std::string s); + bool operator==(InvalidatableFile const &f) const; - std::string * operator->( ) const; + std::string &operator*() const; + std::string *operator->() const; - explicit operator bool( ) const; - }; -} + explicit operator bool() const; +}; +} // namespace openPMD namespace std { - template< > - struct hash< openPMD::InvalidatableFile > - { - using argument_type = openPMD::InvalidatableFile; - using result_type = std::size_t; +template <> +struct hash +{ + using argument_type = openPMD::InvalidatableFile; + using result_type = std::size_t; - result_type operator()( argument_type const & s ) const noexcept; - }; -} + result_type operator()(argument_type const &s) const noexcept; +}; +} // namespace std diff --git a/include/openPMD/IO/JSON/JSONFilePosition.hpp b/include/openPMD/IO/JSON/JSONFilePosition.hpp index 9ae257f626..ca18ddde93 100644 --- a/include/openPMD/IO/JSON/JSONFilePosition.hpp +++ b/include/openPMD/IO/JSON/JSONFilePosition.hpp @@ -21,20 +21,18 @@ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractFilePosition.hpp" +#include "openPMD/config.hpp" #include - namespace openPMD { - struct JSONFilePosition : - public AbstractFilePosition - { - using json = nlohmann::json; - json::json_pointer id; +struct JSONFilePosition : public AbstractFilePosition +{ + using json = nlohmann::json; + json::json_pointer id; - JSONFilePosition( json::json_pointer ptr = json::json_pointer( ) ); - }; -} // openPMD + JSONFilePosition(json::json_pointer ptr = json::json_pointer()); +}; +} // namespace openPMD diff --git a/include/openPMD/IO/JSON/JSONIOHandler.hpp b/include/openPMD/IO/JSON/JSONIOHandler.hpp index 8e017b2eab..37b00fa165 100644 --- a/include/openPMD/IO/JSON/JSONIOHandler.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandler.hpp @@ -21,29 +21,26 @@ #pragma once - #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" - namespace openPMD { - class JSONIOHandler : - public AbstractIOHandler - { - public: - JSONIOHandler( - std::string path, - Access at - ); +class JSONIOHandler : public AbstractIOHandler +{ +public: + JSONIOHandler(std::string path, Access at); - ~JSONIOHandler( ) override; + ~JSONIOHandler() override; - std::string backendName() const override { return "JSON"; } + std::string backendName() const override + { + return "JSON"; + } - std::future< void > flush( ) override; + std::future flush(internal::FlushParams const &) override; - private: - JSONIOHandlerImpl m_impl; - }; -} // openPMD +private: + JSONIOHandlerImpl m_impl; +}; +} // namespace openPMD diff --git a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp index 3d3b7f9c14..23ac579401 100644 --- a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp @@ -21,529 +21,394 @@ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/IO/JSON/JSONFilePosition.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/config.hpp" #include #include #include #include +#include #include #include #include #include -#include - namespace openPMD { - // Wrapper around a shared pointer to: - // * a filename - // * and a boolean indicating whether the file still exists - // The wrapper adds no extra information, but some commodity functions. - // Invariant for JSONIOHandlerImpl: - // For any valid filename, there is at any time at most one - // such shared pointer (wrapper) in the HandlerImpl's data structures - // (counting by pointer equality) - // This means, that a file can be invalidated (i.e. deleted or overwritten) - // by simply searching for one instance of the file e.g. in m_files and - // invalidating this instance - // A new instance may hence only be created after making sure that there are - // no valid instances in the data structures. - struct File +// Wrapper around a shared pointer to: +// * a filename +// * and a boolean indicating whether the file still exists +// The wrapper adds no extra information, but some commodity functions. +// Invariant for JSONIOHandlerImpl: +// For any valid filename, there is at any time at most one +// such shared pointer (wrapper) in the HandlerImpl's data structures +// (counting by pointer equality) +// This means, that a file can be invalidated (i.e. deleted or overwritten) +// by simply searching for one instance of the file e.g. in m_files and +// invalidating this instance +// A new instance may hence only be created after making sure that there are +// no valid instances in the data structures. +struct File +{ + explicit File(std::string s) : fileState{std::make_shared(s)} + {} + + File() = default; + + struct FileState { - explicit File( std::string s ) : - fileState { std::make_shared< FileState >( s ) } + explicit FileState(std::string s) : name{std::move(s)} {} + std::string name; + bool valid = true; + }; - File( ) = default; + std::shared_ptr fileState; + void invalidate() + { + fileState->valid = false; + } - struct FileState + bool valid() const + { + return fileState->valid; + } + + File &operator=(std::string s) + { + if (fileState) { - explicit FileState( std::string s ) : - name { std::move( s ) } - {} + fileState->name = s; + } + else + { + fileState = std::make_shared(s); + } + return *this; + } + bool operator==(File const &f) const + { + return this->fileState == f.fileState; + } - std::string name; - bool valid = true; - }; + std::string &operator*() const + { + return fileState->name; + } - std::shared_ptr< FileState > fileState; + std::string *operator->() const + { + return &fileState->name; + } + explicit operator bool() const + { + return fileState.operator bool(); + } +}; +} // namespace openPMD - void invalidate( ) - { - fileState->valid = false; - } +namespace std +{ +template <> +struct hash +{ + typedef openPMD::File argument_type; + typedef std::size_t result_type; + result_type operator()(argument_type const &s) const noexcept + { + return std::hash>{}(s.fileState); + } +}; - bool valid( ) const - { - return fileState->valid; - } +// std::complex handling +template +void to_json(nlohmann::json &j, const std::complex &p) +{ + j = nlohmann::json{p.real(), p.imag()}; +} +template +void from_json(const nlohmann::json &j, std::complex &p) +{ + p.real(j.at(0)); + p.imag(j.at(1)); +} +} // namespace std - File & operator=( std::string s ) - { - if( fileState ) - { - fileState->name = s; - } - else - { - fileState = std::make_shared< FileState >( s ); - } - return *this; - } +namespace openPMD +{ +class JSONIOHandlerImpl : public AbstractIOHandlerImpl +{ + using json = nlohmann::json; +public: + explicit JSONIOHandlerImpl(AbstractIOHandler *); - bool operator==( - File const & f - ) const - { - return this->fileState == f.fileState; - } + ~JSONIOHandlerImpl() override; + void + createFile(Writable *, Parameter const &) override; - std::string & operator*( ) const - { - return fileState->name; - } + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; - std::string * operator->( ) const - { - return &fileState->name; - } + void extendDataset( + Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; - explicit operator bool( ) const - { - return fileState.operator bool( ); - } - }; -} + void openFile(Writable *, Parameter const &) override; -namespace std -{ - template< > - struct hash< openPMD::File > - { - typedef openPMD::File argument_type; - typedef std::size_t result_type; + void + closeFile(Writable *, Parameter const &) override; + void openPath(Writable *, Parameter const &) override; - result_type operator()( argument_type const & s ) const noexcept - { - return std::hash< shared_ptr< openPMD::File::FileState>> {}( s.fileState ); - } - }; + void openDataset(Writable *, Parameter &) override; - // std::complex handling - template< class T > void to_json(nlohmann::json &j, const std::complex< T > &p) { - j = nlohmann::json {p.real(), p.imag()}; - } + void + deleteFile(Writable *, Parameter const &) override; - template< class T > void from_json(const nlohmann::json &j, std::complex< T > &p) { - p.real(j.at(0)); - p.imag(j.at(1)); - } -} + void + deletePath(Writable *, Parameter const &) override; -namespace openPMD -{ - class JSONIOHandlerImpl : - public AbstractIOHandlerImpl + void deleteDataset( + Writable *, Parameter const &) override; + + void deleteAttribute( + Writable *, Parameter const &) override; + + void writeDataset( + Writable *, Parameter const &) override; + + void writeAttribute( + Writable *, Parameter const &) override; + + void readDataset(Writable *, Parameter &) override; + + void readAttribute(Writable *, Parameter &) override; + + void listPaths(Writable *, Parameter &) override; + + void + listDatasets(Writable *, Parameter &) override; + + void listAttributes(Writable *, Parameter &) override; + + std::future flush(); + +private: + using FILEHANDLE = std::fstream; + + // map each Writable to its associated file + // contains only the filename, without the OS path + std::unordered_map m_files; + + std::unordered_map> m_jsonVals; + + // files that have logically, but not physically been written to + std::unordered_set m_dirty; + + // HELPER FUNCTIONS + + // will use the IOHandler to retrieve the correct directory + // shared pointer to circumvent the fact that c++ pre 17 does + // not enforce (only allow) copy elision in return statements + std::shared_ptr getFilehandle( + File, + Access access); //, Access + // m_frontendAccess=this->m_handler->m_frontendAccess); + + // full operating system path of the given file + std::string fullPath(File); + + std::string fullPath(std::string const &); + + // from a path specification /a/b/c, remove the last + // "folder" (i.e. modify the string to equal /a/b) + static void parentDir(std::string &); + + // Fileposition is assumed to have already been set, + // get it in string form + static std::string filepositionOf(Writable *w); + + // Execute visitor on each pair of positions in the json value + // and the flattened multidimensional array. + // Used for writing from the data to JSON and for reading back into + // the array from JSON + template + static void syncMultidimensionalJson( + nlohmann::json &j, + Offset const &offset, + Extent const &extent, + Extent const &multiplicator, + Visitor visitor, + T *data, + size_t currentdim = 0); + + // multiplicators: an array [m_0,...,m_n] s.t. + // data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] + // (m_n = 1) + // essentially: m_i = \prod_{j=0}^{i-1} extent_j + static Extent getMultiplicators(Extent const &extent); + + static nlohmann::json initializeNDArray(Extent const &extent); + + static Extent getExtent(nlohmann::json &j); + + // remove single '/' in the beginning and end of a string + static std::string removeSlashes(std::string); + + template + static bool hasKey(nlohmann::json &, KeyT &&key); + + // make sure that the given path exists in proper form in + // the passed json value + static void ensurePath(nlohmann::json *json, std::string path); + + // In order not to insert the same file name into the data structures + // with a new pointer (e.g. when reopening), search for a possibly + // existing old pointer. Construct a new pointer only upon failure. + // The bool is true iff the pointer has been newly-created. + // The iterator is an iterator for m_files + std::tuple::iterator, bool> + getPossiblyExisting(std::string file); + + // get the json value representing the whole file, possibly reading + // from disk + std::shared_ptr obtainJsonContents(File); + + // get the json value at the writable's fileposition + nlohmann::json &obtainJsonContents(Writable *writable); + + // write to disk the json contents associated with the file + // remove from m_dirty if unsetDirty == true + void putJsonContents(File, bool unsetDirty = true); + + // figure out the file position of the writable + // (preferring the parent's file position) and extend it + // by extend. return the modified file position. + std::shared_ptr + setAndGetFilePosition(Writable *, std::string extend); + + // figure out the file position of the writable + // (preferring the parent's file position) + // only modify the writable's fileposition when specified + std::shared_ptr + setAndGetFilePosition(Writable *, bool write = true); + + // get the writable's containing file + // if the parent is associated with another file, + // associate the writable with that file and return it + File refreshFileFromParent(Writable *writable); + + void associateWithFile(Writable *writable, File); + + // need to check the name too in order to exclude "attributes" key + static bool isGroup(nlohmann::json::const_iterator it); + + static bool isDataset(nlohmann::json const &j); + + // check whether the json reference contains a valid dataset + template + void verifyDataset(Param const ¶meters, nlohmann::json &); + + static nlohmann::json platformSpecifics(); + + struct DatasetWriter { - using json = nlohmann::json; - - public: - explicit JSONIOHandlerImpl( AbstractIOHandler * ); - - ~JSONIOHandlerImpl( ) override; - - void createFile( - Writable *, - Parameter< Operation::CREATE_FILE > const & - ) override; - - void createPath( - Writable *, - Parameter< Operation::CREATE_PATH > const & - ) override; - - void createDataset( - Writable *, - Parameter< Operation::CREATE_DATASET > const & - ) override; - - void extendDataset( - Writable *, - Parameter< Operation::EXTEND_DATASET > const & - ) override; - - void - availableChunks( - Writable *, - Parameter< Operation::AVAILABLE_CHUNKS > & - ) override; + template + void operator()( + nlohmann::json &json, + const Parameter ¶meters); - void openFile( - Writable *, - Parameter< Operation::OPEN_FILE > const & - ) override; - - void closeFile( - Writable *, - Parameter< Operation::CLOSE_FILE > const & - ) override; - - void openPath( - Writable *, - Parameter< Operation::OPEN_PATH > const & - ) override; - - void openDataset( - Writable *, - Parameter< Operation::OPEN_DATASET > & - ) override; - - void deleteFile( - Writable *, - Parameter< Operation::DELETE_FILE > const & - ) override; - - void deletePath( - Writable *, - Parameter< Operation::DELETE_PATH > const & - ) override; - - void deleteDataset( - Writable *, - Parameter< Operation::DELETE_DATASET > const & - ) override; - - void deleteAttribute( - Writable *, - Parameter< Operation::DELETE_ATT > const & - ) override; - - void writeDataset( - Writable *, - Parameter< Operation::WRITE_DATASET > const & - ) override; - - void writeAttribute( - Writable *, - Parameter< Operation::WRITE_ATT > const & - ) override; - - void readDataset( - Writable *, - Parameter< Operation::READ_DATASET > & - ) override; - - void readAttribute( - Writable *, - Parameter< Operation::READ_ATT > & - ) override; - - void listPaths( - Writable *, - Parameter< Operation::LIST_PATHS > & - ) override; - - void listDatasets( - Writable *, - Parameter< Operation::LIST_DATASETS > & - ) override; - - void listAttributes( - Writable *, - Parameter< Operation::LIST_ATTS > & - ) override; - - std::future< void > flush( ) override; - - - private: - - using FILEHANDLE = std::fstream; - - // map each Writable to its associated file - // contains only the filename, without the OS path - std::unordered_map< - Writable *, - File - > m_files; - - std::unordered_map< - File, - std::shared_ptr< nlohmann::json >> m_jsonVals; - - // files that have logically, but not physically been written to - std::unordered_set< File > m_dirty; - - - // HELPER FUNCTIONS - - - // will use the IOHandler to retrieve the correct directory - // shared pointer to circumvent the fact that c++ pre 17 does - // not enforce (only allow) copy elision in return statements - std::shared_ptr< FILEHANDLE > getFilehandle( - File, - Access access - ); //, Access m_frontendAccess=this->m_handler->m_frontendAccess); - - // full operating system path of the given file - std::string fullPath( File ); - - std::string fullPath( std::string const & ); - - // from a path specification /a/b/c, remove the last - // "folder" (i.e. modify the string to equal /a/b) - static void parentDir( std::string & ); - - // Fileposition is assumed to have already been set, - // get it in string form - static std::string filepositionOf( Writable * w ); - - // Execute visitor on each pair of positions in the json value - // and the flattened multidimensional array. - // Used for writing from the data to JSON and for reading back into - // the array from JSON - template< - typename T, - typename Visitor - > - static void syncMultidimensionalJson( - nlohmann::json & j, - Offset const & offset, - Extent const & extent, - Extent const & multiplicator, - Visitor visitor, - T * data, - size_t currentdim = 0 - ); - - // multiplicators: an array [m_0,...,m_n] s.t. - // data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] - // (m_n = 1) - // essentially: m_i = \prod_{j=0}^{i-1} extent_j - static Extent getMultiplicators( Extent const & extent ); - - static nlohmann::json initializeNDArray( Extent const & extent ); - - static Extent getExtent( nlohmann::json & j ); - - - // remove single '/' in the beginning and end of a string - static std::string removeSlashes( std::string ); - - template< typename KeyT > - static bool hasKey( - nlohmann::json &, - KeyT && key - ); - - // make sure that the given path exists in proper form in - // the passed json value - static void ensurePath( - nlohmann::json * json, - std::string path - ); - - // In order not to insert the same file name into the data structures - // with a new pointer (e.g. when reopening), search for a possibly - // existing old pointer. Construct a new pointer only upon failure. - // The bool is true iff the pointer has been newly-created. - // The iterator is an iterator for m_files - std::tuple< - File, - std::unordered_map< - Writable *, - File - >::iterator, - bool - > getPossiblyExisting( - std::string file - ); - - // get the json value representing the whole file, possibly reading - // from disk - std::shared_ptr< nlohmann::json > obtainJsonContents( File ); - - // get the json value at the writable's fileposition - nlohmann::json & obtainJsonContents( Writable * writable ); - - // write to disk the json contents associated with the file - // remove from m_dirty if unsetDirty == true - void putJsonContents( - File, - bool unsetDirty = true - ); - - // figure out the file position of the writable - // (preferring the parent's file position) and extend it - // by extend. return the modified file position. - std::shared_ptr< JSONFilePosition > setAndGetFilePosition( - Writable *, - std::string extend - ); - - // figure out the file position of the writable - // (preferring the parent's file position) - // only modify the writable's fileposition when specified - std::shared_ptr< JSONFilePosition > setAndGetFilePosition( - Writable *, - bool write = true - ); - - // get the writable's containing file - // if the parent is associated with another file, - // associate the writable with that file and return it - File refreshFileFromParent( Writable * writable ); - - void associateWithFile( - Writable * writable, - File - ); - - // need to check the name too in order to exclude "attributes" key - static bool isGroup( nlohmann::json::const_iterator it ); - - static bool isDataset( nlohmann::json const & j ); - - - // check whether the json reference contains a valid dataset - template< typename Param > - void verifyDataset( - Param const & parameters, - nlohmann::json & - ); - - static nlohmann::json platformSpecifics( ); - - struct DatasetWriter - { - template< typename T > - void operator()( - nlohmann::json & json, - const Parameter< Operation::WRITE_DATASET > & parameters - ); + std::string errorMsg = "JSON: writeDataset"; + }; - std::string errorMsg = "JSON: writeDataset"; - }; + struct DatasetReader + { + template + void operator()( + nlohmann::json &json, + Parameter ¶meters); - struct DatasetReader - { - template< typename T > - void operator()( - nlohmann::json & json, - Parameter< Operation::READ_DATASET > & parameters - ); + std::string errorMsg = "JSON: readDataset"; + }; - std::string errorMsg = "JSON: readDataset"; - }; + struct AttributeWriter + { + template + void operator()(nlohmann::json &, Attribute::resource const &); - struct AttributeWriter - { - template< typename T > - void operator()( - nlohmann::json &, - Attribute::resource const & - ); + std::string errorMsg = "JSON: writeAttribute"; + }; - std::string errorMsg = "JSON: writeAttribute"; - }; + struct AttributeReader + { + template + void operator()(nlohmann::json &, Parameter &); - struct AttributeReader - { - template< typename T > - void operator()( - nlohmann::json &, - Parameter< Operation::READ_ATT > & - ); + std::string errorMsg = "JSON: writeAttribute"; + }; - std::string errorMsg = "JSON: writeAttribute"; - }; + template + struct CppToJSON + { + nlohmann::json operator()(T const &); + }; - template< typename T > - struct CppToJSON - { - nlohmann::json operator()( T const & ); - }; + template + struct CppToJSON> + { + nlohmann::json operator()(std::vector const &); + }; - template< typename T > - struct CppToJSON< std::vector< T>> - { - nlohmann::json operator()( std::vector< T > const & ); - }; - - template< typename T, int n > - struct CppToJSON< - std::array< - T, - n>> - { - nlohmann::json operator()( - std::array< - T, - n - > const & - ); - }; - - template< - typename T, - typename Enable = T - > - struct JsonToCpp - { - T operator()( nlohmann::json const & ); - }; + template + struct CppToJSON> + { + nlohmann::json operator()(std::array const &); + }; - template< typename T > - struct JsonToCpp< std::vector< T > > - { - std::vector< T > operator()( nlohmann::json const & ); - }; - - template< typename T, int n > - struct JsonToCpp< - std::array< - T, - n - > - > - { - std::array< - T, - n - > operator()( nlohmann::json const & ); - }; - - template< typename T > - struct JsonToCpp< - T, - typename std::enable_if< - std::is_floating_point< - T - >::value - >::type - > - { - T operator()( nlohmann::json const & ); - }; + template + struct JsonToCpp + { + T operator()(nlohmann::json const &); + }; + + template + struct JsonToCpp> + { + std::vector operator()(nlohmann::json const &); + }; + + template + struct JsonToCpp> + { + std::array operator()(nlohmann::json const &); + }; + + template + struct JsonToCpp< + T, + typename std::enable_if::value>::type> + { + T operator()(nlohmann::json const &); }; +}; -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index 59204947bb..3dad0772f1 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -20,67 +20,68 @@ */ #pragma once -#include "openPMD/auxiliary/Option.hpp" -#include "openPMD/auxiliary/Variant.hpp" -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/Mesh.hpp" #include "openPMD/ParticleSpecies.hpp" #include "openPMD/Streaming.hpp" - +#include "openPMD/auxiliary/Option.hpp" +#include "openPMD/auxiliary/Variant.hpp" +#include "openPMD/backend/Attributable.hpp" +#include "openPMD/backend/Container.hpp" namespace openPMD { -/** @brief Logical compilation of data from one snapshot (e.g. a single simulation cycle). +/** @brief Logical compilation of data from one snapshot (e.g. a single + * simulation cycle). * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#required-attributes-for-the-basepath + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#required-attributes-for-the-basepath */ class Iteration : public LegacyAttributable { - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; friend class SeriesInterface; friend class WriteIterations; friend class SeriesIterator; public: - Iteration( Iteration const & ) = default; - Iteration & operator=( Iteration const & ) = default; + Iteration(Iteration const &) = default; + Iteration &operator=(Iteration const &) = default; /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). * @return Global reference time for this iteration. */ - template< typename T > + template T time() const; /** Set the global reference time for this iteration. * - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. + * float, double). * @param newTime Global reference time for this iteration. * @return Reference to modified iteration. */ - template< typename T > - Iteration& setTime(T newTime); + template + Iteration &setTime(T newTime); /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). * @return Time step used to reach this iteration. */ - template< typename T > + template T dt() const; /** Set the time step used to reach this iteration. * - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. + * float, double). * @param newDt Time step used to reach this iteration. * @return Reference to modified iteration. */ - template< typename T > - Iteration& setDt(T newDt); + template + Iteration &setDt(T newDt); /** * @return Conversion factor to convert time and dt to seconds. @@ -91,7 +92,7 @@ class Iteration : public LegacyAttributable * @param newTimeUnitSI new value for timeUnitSI * @return Reference to modified iteration. */ - Iteration& setTimeUnitSI(double newTimeUnitSI); + Iteration &setTimeUnitSI(double newTimeUnitSI); /** Close an iteration * @@ -108,8 +109,7 @@ class Iteration : public LegacyAttributable * API. Currently, disallowing to reopen closed iterations satisfies * the requirements of the streaming API. */ - Iteration & - close( bool flush = true ); + Iteration &close(bool flush = true); /** Open an iteration * @@ -124,8 +124,7 @@ class Iteration : public LegacyAttributable * * @return Reference to iteration. */ - Iteration & - open(); + Iteration &open(); /** * @brief Has the iteration been closed? @@ -133,8 +132,7 @@ class Iteration : public LegacyAttributable * * @return Whether the iteration has been closed. */ - bool - closed() const; + bool closed() const; /** * @brief Has the iteration been closed by the writer? @@ -147,13 +145,13 @@ class Iteration : public LegacyAttributable * @return Whether the iteration has been explicitly closed (yet) by the * writer. */ - bool - closedByWriter() const; + bool closedByWriter() const; - Container< Mesh > meshes; - Container< ParticleSpecies > particles; //particleSpecies? + Container meshes; + Container particles; // particleSpecies? virtual ~Iteration() = default; + private: Iteration(); @@ -179,35 +177,39 @@ class Iteration : public LegacyAttributable * containing this iteration. */ std::string filename; + bool beginStep = false; }; - void flushFileBased(std::string const&, uint64_t); - void flushGroupBased(uint64_t); - void flushVariableBased(uint64_t); - void flush(); - void deferParseAccess( DeferredParseAccess ); + void flushFileBased( + std::string const &, uint64_t, internal::FlushParams const &); + void flushGroupBased(uint64_t, internal::FlushParams const &); + void flushVariableBased(uint64_t, internal::FlushParams const &); + void flush(internal::FlushParams const &); + void deferParseAccess(DeferredParseAccess); /* - * Control flow for read(), readFileBased(), readGroupBased() and - * read_impl(): - * read() is called as the entry point. File-based and group-based + * Control flow for runDeferredParseAccess(), readFileBased(), + * readGroupBased() and read_impl(): + * runDeferredParseAccess() is called as the entry point. + * File-based and group-based * iteration layouts need to be parsed slightly differently: * In file-based iteration layout, each iteration's file also contains * attributes for the /data group. In group-based layout, those have * already been parsed during opening of the Series. - * Hence, read() will call either readFileBased() or readGroupBased() to + * Hence, runDeferredParseAccess() will call either readFileBased() or + * readGroupBased() to * allow for those different control flows. * Finally, read_impl() is called which contains the common parsing * logic for an iteration. - * + * * reread() reads again an Iteration that has been previously read. * Calling it on an Iteration not yet parsed is an error. * */ - void read(); - void reread( std::string const & path ); - void readFileBased( std::string filePath, std::string const & groupPath ); - void readGorVBased( std::string const & groupPath ); - void read_impl( std::string const & groupPath ); + void reread(std::string const &path); + void readFileBased( + std::string filePath, std::string const &groupPath, bool beginStep); + void readGorVBased(std::string const &groupPath, bool beginStep); + void read_impl(std::string const &groupPath); /** * @brief Whether an iteration has been closed yet. @@ -216,7 +218,7 @@ class Iteration : public LegacyAttributable enum class CloseStatus { ParseAccessDeferred, //!< The reader has not yet parsed this iteration - Open, //!< Iteration has not been closed + Open, //!< Iteration has not been closed ClosedInFrontend, /*!< Iteration has been closed, but task has not yet been propagated to the backend */ ClosedInBackend, /*!< Iteration has been closed and task has been @@ -233,8 +235,8 @@ class Iteration : public LegacyAttributable * Once an iteration has been closed, no further flushes shall be performed. * If flushing a closed file, the old file may otherwise be overwritten. */ - std::shared_ptr< CloseStatus > m_closed = - std::make_shared< CloseStatus >( CloseStatus::Open ); + std::shared_ptr m_closed = + std::make_shared(CloseStatus::Open); /** * Whether a step is currently active for this iteration. @@ -243,13 +245,18 @@ class Iteration : public LegacyAttributable * Access via stepStatus() method to automatically select the correct * one among both flags. */ - std::shared_ptr< StepStatus > m_stepStatus = - std::make_shared< StepStatus >( StepStatus::NoStep ); + std::shared_ptr m_stepStatus = + std::make_shared(StepStatus::NoStep); - std::shared_ptr< auxiliary::Option< DeferredParseAccess > > + std::shared_ptr > m_deferredParseAccess = - std::make_shared< auxiliary::Option< DeferredParseAccess > >( - auxiliary::Option< DeferredParseAccess >() ); + std::make_shared >( + auxiliary::Option()); + + std::shared_ptr > + m_overrideFilebasedFilename = + std::make_shared >( + auxiliary::Option()); /** * @brief Begin an IO step on the IO file (or file-like object) @@ -258,8 +265,7 @@ class Iteration : public LegacyAttributable * * @return AdvanceStatus */ - AdvanceStatus - beginStep(); + AdvanceStatus beginStep(bool reread); /** * @brief End an IO step on the IO file (or file-like object) @@ -268,8 +274,7 @@ class Iteration : public LegacyAttributable * * @return AdvanceStatus */ - void - endStep(); + void endStep(); /** * @brief Is a step currently active for this iteration? @@ -279,8 +284,7 @@ class Iteration : public LegacyAttributable * in case of file-based iteration layout, it is local (member of this very * object). */ - StepStatus - getStepStatus(); + StepStatus getStepStatus(); /** * @brief Set step activity status for this iteration. @@ -290,7 +294,7 @@ class Iteration : public LegacyAttributable * in case of file-based iteration layout, it is set locally (member of * this very object). */ - void setStepStatus( StepStatus ); + void setStepStatus(StepStatus); /* * @brief Check recursively whether this Iteration is dirty. @@ -300,56 +304,44 @@ class Iteration : public LegacyAttributable * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; /** * @brief Link with parent. - * + * * @param w The Writable representing the parent. */ - virtual void linkHierarchy(Writable& w); + virtual void linkHierarchy(Writable &w); /** * @brief Access an iteration in read mode that has potentially not been * parsed yet. - * + * */ void runDeferredParseAccess(); -}; // Iteration - -extern template -float -Iteration::time< float >() const; +}; // Iteration -extern template -double -Iteration::time< double >() const; +extern template float Iteration::time() const; -extern template -long double -Iteration::time< long double >() const; +extern template double Iteration::time() const; -template< typename T > -inline T -Iteration::time() const -{ return this->readFloatingpoint< T >("time"); } +extern template long double Iteration::time() const; +template +inline T Iteration::time() const +{ + return this->readFloatingpoint("time"); +} -extern template -float -Iteration::dt< float >() const; +extern template float Iteration::dt() const; -extern template -double -Iteration::dt< double >() const; +extern template double Iteration::dt() const; -extern template -long double -Iteration::dt< long double >() const; +extern template long double Iteration::dt() const; -template< typename T > -inline T -Iteration::dt() const -{ return this->readFloatingpoint< T >("dt"); } -} // openPMD +template +inline T Iteration::dt() const +{ + return this->readFloatingpoint("dt"); +} +} // namespace openPMD diff --git a/include/openPMD/IterationEncoding.hpp b/include/openPMD/IterationEncoding.hpp index fa655aa348..81cc191000 100644 --- a/include/openPMD/IterationEncoding.hpp +++ b/include/openPMD/IterationEncoding.hpp @@ -22,19 +22,20 @@ #include - namespace openPMD { /** Encoding scheme of an Iterations Series'. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series */ enum class IterationEncoding { - fileBased, groupBased, variableBased + fileBased, + groupBased, + variableBased }; -std::ostream& -operator<<(std::ostream&, openPMD::IterationEncoding const&); +std::ostream &operator<<(std::ostream &, openPMD::IterationEncoding const &); -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/Mesh.hpp b/include/openPMD/Mesh.hpp index bc875d25db..17ce9373de 100644 --- a/include/openPMD/Mesh.hpp +++ b/include/openPMD/Mesh.hpp @@ -30,28 +30,28 @@ #include #include - namespace openPMD { /** @brief Container for N-dimensional, homogeneous Records. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#mesh-based-records + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#mesh-based-records */ -class Mesh : public BaseRecord< MeshRecordComponent > +class Mesh : public BaseRecord { - friend class Container< Mesh >; + friend class Container; friend class Iteration; public: - Mesh(Mesh const&) = default; - Mesh& operator=(Mesh const&) = default; + Mesh(Mesh const &) = default; + Mesh &operator=(Mesh const &) = default; ~Mesh() override = default; /** @brief Enumerated datatype for the geometry of the mesh. * - * @note If the default values do not suit your application, you can set arbitrary - * Geometry with MeshRecordComponent::setAttribute("geometry", VALUE). - * Note that this might break openPMD compliance and tool support. + * @note If the default values do not suit your application, you can set + * arbitrary Geometry with MeshRecordComponent::setAttribute("geometry", + * VALUE). Note that this might break openPMD compliance and tool support. */ enum class Geometry { @@ -60,7 +60,7 @@ class Mesh : public BaseRecord< MeshRecordComponent > cylindrical, spherical, other - }; //Geometry + }; // Geometry /** @brief Enumerated datatype for the memory layout of N-dimensional data. */ @@ -68,7 +68,7 @@ class Mesh : public BaseRecord< MeshRecordComponent > { C = 'C', F = 'F' - }; //DataOrder + }; // DataOrder /** * @return Enum representing the geometry of the mesh of the mesh record. @@ -83,7 +83,7 @@ class Mesh : public BaseRecord< MeshRecordComponent > * @param g geometry of the mesh of the mesh record. * @return Reference to modified mesh. */ - Mesh& setGeometry(Geometry g); + Mesh &setGeometry(Geometry g); /** Set the geometry of the mesh of the mesh record. * * If the geometry is unknown to the openPMD-api, the string is prefixed @@ -92,20 +92,24 @@ class Mesh : public BaseRecord< MeshRecordComponent > * @param geometry geometry of the mesh of the mesh record, as string * @return Reference to modified mesh. */ - Mesh& setGeometry(std::string geometry); + Mesh &setGeometry(std::string geometry); /** - * @throw no_such_attribute_error If Mesh::geometry is not Mesh::Geometry::thetaMode. - * @return String representing additional parameters for the geometry, separated by a @code ; @endcode. + * @throw no_such_attribute_error If Mesh::geometry is not + * Mesh::Geometry::thetaMode. + * @return String representing additional parameters for the geometry, + * separated by a @code ; @endcode. */ std::string geometryParameters() const; - /** Set additional parameters for the geometry, separated by a @code ; @endcode. + /** Set additional parameters for the geometry, separated by a @code ; + * @endcode. * * @note Separation constraint is not verified by API. - * @param geometryParameters additional parameters for the geometry, separated by a @code ; @endcode. + * @param geometryParameters additional parameters for the geometry, + * separated by a @code ; @endcode. * @return Reference to modified mesh. */ - Mesh& setGeometryParameters(std::string const& geometryParameters); + Mesh &setGeometryParameters(std::string const &geometryParameters); /** * @return Memory layout of N-dimensional data. @@ -116,105 +120,133 @@ class Mesh : public BaseRecord< MeshRecordComponent > * @param dor memory layout of N-dimensional data. * @return Reference to modified mesh. */ - Mesh& setDataOrder(DataOrder dor); + Mesh &setDataOrder(DataOrder dor); /** * @return Ordering of the labels for the Mesh::geometry of the mesh. */ - std::vector< std::string > axisLabels() const; + std::vector axisLabels() const; /** Set the ordering of the labels for the Mesh::geometry of the mesh. * * @note Dimensionality constraint is not verified by API. - * @param axisLabels vector containing N (string) elements, where N is the number of dimensions in the simulation. + * @param axisLabels vector containing N (string) elements, where N is + * the number of dimensions in the simulation. * @return Reference to modified mesh. */ - Mesh& setAxisLabels(std::vector< std::string > const & axisLabels); + Mesh &setAxisLabels(std::vector const &axisLabels); /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @return vector of T representing the spacing of the grid points along each dimension (in the units of the simulation). - */ - template< typename T > - std::vector< T > gridSpacing() const; - /** Set the spacing of the grid points along each dimension (in the units of the simulation). + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @return vector of T representing the spacing of the grid points along + * each dimension (in the units of the simulation). + */ + template + std::vector gridSpacing() const; + /** Set the spacing of the grid points along each dimension (in the units of + * the simulation). * * @note Dimensionality constraint is not verified by API. - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @param gridSpacing vector containing N (T) elements, where N is the number of dimensions in the simulation. + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @param gridSpacing vector containing N (T) elements, where N is the + * number of dimensions in the simulation. * @return Reference to modified mesh. */ - template< typename T, - typename = std::enable_if_t::value> > - Mesh& setGridSpacing(std::vector< T > const & gridSpacing); + template < + typename T, + typename = std::enable_if_t::value> > + Mesh &setGridSpacing(std::vector const &gridSpacing); /** - * @return Vector of (double) representing the start of the current domain of the simulation (position of the beginning of the first cell) in simulation units. + * @return Vector of (double) representing the start of the current domain + * of the simulation (position of the beginning of the first cell) in + * simulation units. */ - std::vector< double > gridGlobalOffset() const; - /** Set the start of the current domain of the simulation (position of the beginning of the first cell) in simulation units. + std::vector gridGlobalOffset() const; + /** Set the start of the current domain of the simulation (position of the + * beginning of the first cell) in simulation units. * * @note Dimensionality constraint is not verified by API. - * @param gridGlobalOffset vector containing N (double) elements, where N is the number of dimensions in the simulation. + * @param gridGlobalOffset vector containing N (double) elements, where + * N is the number of dimensions in the simulation. * @return Reference to modified mesh. */ - Mesh& setGridGlobalOffset(std::vector< double > const & gridGlobalOffset); + Mesh &setGridGlobalOffset(std::vector const &gridGlobalOffset); /** - * @return Unit-conversion factor to multiply each value in Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from simulation units to SI units. + * @return Unit-conversion factor to multiply each value in + * Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from + * simulation units to SI units. */ double gridUnitSI() const; - /** Set the unit-conversion factor to multiply each value in Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from simulation units to SI units. + /** Set the unit-conversion factor to multiply each value in + * Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from + * simulation units to SI units. * - * @param gridUnitSI unit-conversion factor to multiply each value in Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from simulation units to SI units. + * @param gridUnitSI unit-conversion factor to multiply each value in + * Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from + * simulation units to SI units. * @return Reference to modified mesh. */ - Mesh& setGridUnitSI(double gridUnitSI); + Mesh &setGridUnitSI(double gridUnitSI); - /** Set the powers of the 7 base measures characterizing the record's unit in SI. + /** Set the powers of the 7 base measures characterizing the record's unit + * in SI. * - * @param unitDimension map containing pairs of (UnitDimension, double) that represent the power of the particular base. + * @param unitDimension map containing pairs of (UnitDimension, double) + * that represent the power of the particular base. * @return Reference to modified mesh. */ - Mesh& setUnitDimension(std::map< UnitDimension, double > const& unitDimension); + Mesh & + setUnitDimension(std::map const &unitDimension); /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @return Offset between the time at which this record is defined and the Iteration::time attribute of the Series::basePath level. + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @return Offset between the time at which this record is defined and the + * Iteration::time attribute of the Series::basePath level. */ - template< typename T > + template T timeOffset() const; - /** Set the offset between the time at which this record is defined and the Iteration::time attribute of the Series::basePath level. + /** Set the offset between the time at which this record is defined and the + * Iteration::time attribute of the Series::basePath level. * - * @note This should be written in the same unit system as Iteration::time. - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @param timeOffset Offset between the time at which this record is defined and the Iteration::time attribute of the Series::basePath level. + * @note This should be written in the same unit system as + * Iteration::time. + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @param timeOffset Offset between the time at which this record is + * defined and the Iteration::time attribute of the Series::basePath level. * @return Reference to modified mesh. */ - template< typename T, - typename = std::enable_if_t::value> > - Mesh& setTimeOffset(T timeOffset); + template < + typename T, + typename = std::enable_if_t::value> > + Mesh &setTimeOffset(T timeOffset); private: Mesh(); - void flush_impl(std::string const&) override; + void + flush_impl(std::string const &, internal::FlushParams const &) override; void read() override; }; // Mesh -template< typename T > -inline std::vector< T > -Mesh::gridSpacing() const -{ return readVectorFloatingpoint< T >("gridSpacing"); } +template +inline std::vector Mesh::gridSpacing() const +{ + return readVectorFloatingpoint("gridSpacing"); +} -template< typename T > -inline T -Mesh::timeOffset() const -{ return readFloatingpoint< T >("timeOffset"); } +template +inline T Mesh::timeOffset() const +{ + return readFloatingpoint("timeOffset"); +} -std::ostream& -operator<<(std::ostream&, openPMD::Mesh::Geometry const&); +std::ostream &operator<<(std::ostream &, openPMD::Mesh::Geometry const &); -std::ostream& -operator<<(std::ostream&, openPMD::Mesh::DataOrder const&); +std::ostream &operator<<(std::ostream &, openPMD::Mesh::DataOrder const &); -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/ParticlePatches.hpp b/include/openPMD/ParticlePatches.hpp index ca0d0ff76e..f3c4c0b943 100644 --- a/include/openPMD/ParticlePatches.hpp +++ b/include/openPMD/ParticlePatches.hpp @@ -23,25 +23,24 @@ #include "openPMD/backend/Container.hpp" #include "openPMD/backend/PatchRecord.hpp" -#include #include - +#include namespace openPMD { - class ParticlePatches : public Container< PatchRecord > - { - friend class ParticleSpecies; - friend class Container< ParticlePatches >; - friend class Container< PatchRecord >; +class ParticlePatches : public Container +{ + friend class ParticleSpecies; + friend class Container; + friend class Container; - public: - size_t numPatches() const; - ~ParticlePatches() override = default; +public: + size_t numPatches() const; + ~ParticlePatches() override = default; - private: - ParticlePatches() = default; - void read(); - }; // ParticlePatches +private: + ParticlePatches() = default; + void read(); +}; // ParticlePatches } // namespace openPMD diff --git a/include/openPMD/ParticleSpecies.hpp b/include/openPMD/ParticleSpecies.hpp index a32fb17001..0257cd474f 100644 --- a/include/openPMD/ParticleSpecies.hpp +++ b/include/openPMD/ParticleSpecies.hpp @@ -20,21 +20,20 @@ */ #pragma once -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" #include "openPMD/ParticlePatches.hpp" #include "openPMD/Record.hpp" +#include "openPMD/backend/Attributable.hpp" +#include "openPMD/backend/Container.hpp" #include - namespace openPMD { -class ParticleSpecies : public Container< Record > +class ParticleSpecies : public Container { - friend class Container< ParticleSpecies >; - friend class Container< Record >; + friend class Container; + friend class Container; friend class Iteration; public: @@ -44,7 +43,7 @@ class ParticleSpecies : public Container< Record > ParticleSpecies(); void read(); - void flush(std::string const &) override; + void flush(std::string const &, internal::FlushParams const &) override; /** * @brief Check recursively whether this ParticleSpecies is dirty. @@ -54,29 +53,28 @@ class ParticleSpecies : public Container< Record > * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; }; namespace traits { - template<> - struct GenerationPolicy< ParticleSpecies > + template <> + struct GenerationPolicy { - template< typename T > - void operator()(T & ret) + template + void operator()(T &ret) { ret.particlePatches.linkHierarchy(ret.writable()); - auto& np = ret.particlePatches["numParticles"]; - auto& npc = np[RecordComponent::SCALAR]; + auto &np = ret.particlePatches["numParticles"]; + auto &npc = np[RecordComponent::SCALAR]; npc.resetDataset(Dataset(determineDatatype(), {1})); npc.parent() = np.parent(); - auto& npo = ret.particlePatches["numParticlesOffset"]; - auto& npoc = npo[RecordComponent::SCALAR]; + auto &npo = ret.particlePatches["numParticlesOffset"]; + auto &npoc = npo[RecordComponent::SCALAR]; npoc.resetDataset(Dataset(determineDatatype(), {1})); npoc.parent() = npo.parent(); } }; -} // traits -} // openPMD +} // namespace traits +} // namespace openPMD diff --git a/include/openPMD/ReadIterations.hpp b/include/openPMD/ReadIterations.hpp index 915b1446ac..7bbdc132bf 100644 --- a/include/openPMD/ReadIterations.hpp +++ b/include/openPMD/ReadIterations.hpp @@ -34,24 +34,22 @@ class IndexedIteration : public Iteration friend class SeriesIterator; public: - using iterations_t = decltype( internal::SeriesData::iterations ); + using iterations_t = decltype(internal::SeriesData::iterations); using index_t = iterations_t::key_type; index_t const iterationIndex; private: - template< typename Iteration_t > - IndexedIteration( Iteration_t && it, index_t index ) - : Iteration( std::forward< Iteration_t >( it ) ) - , iterationIndex( index ) - { - } + template + IndexedIteration(Iteration_t &&it, index_t index) + : Iteration(std::forward(it)), iterationIndex(index) + {} }; class SeriesIterator { using iteration_index_t = IndexedIteration::index_t; - using maybe_series_t = auxiliary::Option< Series >; + using maybe_series_t = auxiliary::Option; maybe_series_t m_series; iteration_index_t m_currentIteration = 0; @@ -60,15 +58,15 @@ class SeriesIterator //! construct the end() iterator explicit SeriesIterator(); - SeriesIterator( Series ); + SeriesIterator(Series); - SeriesIterator & operator++(); + SeriesIterator &operator++(); IndexedIteration operator*(); - bool operator==( SeriesIterator const & other ) const; + bool operator==(SeriesIterator const &other) const; - bool operator!=( SeriesIterator const & other ) const; + bool operator!=(SeriesIterator const &other) const; static SeriesIterator end(); }; @@ -94,12 +92,12 @@ class ReadIterations friend class Series; private: - using iterations_t = decltype( internal::SeriesData::iterations ); + using iterations_t = decltype(internal::SeriesData::iterations); using iterator_t = SeriesIterator; Series m_series; - ReadIterations( Series ); + ReadIterations(Series); public: iterator_t begin(); diff --git a/include/openPMD/Record.hpp b/include/openPMD/Record.hpp index b0c3653704..4f7ee51c28 100644 --- a/include/openPMD/Record.hpp +++ b/include/openPMD/Record.hpp @@ -20,54 +20,55 @@ */ #pragma once -#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/RecordComponent.hpp" +#include "openPMD/backend/BaseRecord.hpp" #include -#include #include - +#include namespace openPMD { -class Record : public BaseRecord< RecordComponent > +class Record : public BaseRecord { - friend class Container< Record >; + friend class Container; friend class Iteration; friend class ParticleSpecies; public: - Record(Record const&) = default; - Record& operator=(Record const&) = default; + Record(Record const &) = default; + Record &operator=(Record const &) = default; ~Record() override = default; - Record& setUnitDimension(std::map< UnitDimension, double > const&); + Record &setUnitDimension(std::map const &); - template< typename T > + template T timeOffset() const; - template< typename T > - Record& setTimeOffset(T); + template + Record &setTimeOffset(T); private: Record(); - void flush_impl(std::string const&) override; + void + flush_impl(std::string const &, internal::FlushParams const &) override; void read() override; -}; //Record - +}; // Record -template< typename T > -inline T -Record::timeOffset() const -{ return readFloatingpoint< T >("timeOffset"); } +template +inline T Record::timeOffset() const +{ + return readFloatingpoint("timeOffset"); +} -template< typename T > -inline Record& -Record::setTimeOffset(T to) +template +inline Record &Record::setTimeOffset(T to) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("timeOffset", to); return *this; } -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index 7ccebcd3e3..f1c8cae451 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -20,79 +20,71 @@ */ #pragma once -#include "openPMD/backend/BaseRecordComponent.hpp" -#include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/Dataset.hpp" +#include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" +#include #include -#include #include +#include #include -#include #include #include +#include #include #include -#include // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected #endif - namespace openPMD { namespace traits { -/** Emulate in the C++17 concept ContiguousContainer - * - * Users can implement this trait for a type to signal it can be used as - * contiguous container. - * - * See: - * https://en.cppreference.com/w/cpp/named_req/ContiguousContainer - */ -template< typename T > -struct IsContiguousContainer -{ - static constexpr bool value = false; -}; + /** Emulate in the C++17 concept ContiguousContainer + * + * Users can implement this trait for a type to signal it can be used as + * contiguous container. + * + * See: + * https://en.cppreference.com/w/cpp/named_req/ContiguousContainer + */ + template + struct IsContiguousContainer + { + static constexpr bool value = false; + }; -template< typename T_Value > -struct IsContiguousContainer< std::vector< T_Value > > -{ - static constexpr bool value = true; -}; - -template< - typename T_Value, - std::size_t N -> -struct IsContiguousContainer< std::array< T_Value, N > > -{ - static constexpr bool value = true; -}; + template + struct IsContiguousContainer > + { + static constexpr bool value = true; + }; + + template + struct IsContiguousContainer > + { + static constexpr bool value = true; + }; } // namespace traits -template< typename T > +template class DynamicMemoryView; class RecordComponent : public BaseRecordComponent { - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; friend class Iteration; friend class ParticleSpecies; - template< typename T_elem > + template friend class BaseRecord; friend class Record; friend class Mesh; - template< typename > + template friend class DynamicMemoryView; public: @@ -103,7 +95,7 @@ class RecordComponent : public BaseRecordComponent AUTO }; // Allocation - RecordComponent& setUnitSI(double); + RecordComponent &setUnitSI(double); /** * @brief Declare the dataset's type and extent. @@ -124,7 +116,7 @@ class RecordComponent : public BaseRecordComponent * * @return RecordComponent& */ - RecordComponent & resetDataset( Dataset ); + RecordComponent &resetDataset(Dataset); uint8_t getDimensionality() const; Extent getExtent() const; @@ -137,8 +129,8 @@ class RecordComponent : public BaseRecordComponent * @tparam T type of the stored value * @return A reference to this RecordComponent. */ - template< typename T > - RecordComponent& makeConstant(T); + template + RecordComponent &makeConstant(T); /** Create a dataset with zero extent in each dimension. * @@ -148,8 +140,8 @@ class RecordComponent : public BaseRecordComponent * zero. * @return A reference to this RecordComponent. */ - template< typename T > - RecordComponent& makeEmpty( uint8_t dimensions ); + template + RecordComponent &makeEmpty(uint8_t dimensions); /** * @brief Non-template overload of RecordComponent::makeEmpty(). @@ -159,7 +151,7 @@ class RecordComponent : public BaseRecordComponent * @param dimensions The dimensionality of the dataset. * @return RecordComponent& */ - RecordComponent& makeEmpty( Datatype dt, uint8_t dimensions ); + RecordComponent &makeEmpty(Datatype dt, uint8_t dimensions); /** Returns true if this is an empty record component * @@ -177,33 +169,28 @@ class RecordComponent : public BaseRecordComponent * If offset is non-zero and extent is {-1u} the leftover extent in the * record component will be selected. */ - template< typename T > - std::shared_ptr< T > loadChunk( - Offset = { 0u }, - Extent = { -1u } ); + template + std::shared_ptr loadChunk(Offset = {0u}, Extent = {-1u}); /** Load a chunk of data into pre-allocated memory * - * shared_ptr for data must be pre-allocated, contiguous and large enough for extent + * shared_ptr for data must be pre-allocated, contiguous and large enough + * for extent * * Set offset to {0u} and extent to {-1u} for full selection. * * If offset is non-zero and extent is {-1u} the leftover extent in the * record component will be selected. */ - template< typename T > - void loadChunk( - std::shared_ptr< T >, - Offset, - Extent ); + template + void loadChunk(std::shared_ptr, Offset, Extent); - template< typename T > - void storeChunk(std::shared_ptr< T >, Offset, Extent); + template + void storeChunk(std::shared_ptr, Offset, Extent); - template< typename T_ContiguousContainer > + template typename std::enable_if< - traits::IsContiguousContainer< T_ContiguousContainer >::value - >::type + traits::IsContiguousContainer::value>::type storeChunk(T_ContiguousContainer &, Offset = {0u}, Extent = {-1u}); /** @@ -236,35 +223,34 @@ class RecordComponent : public BaseRecordComponent * * @return View into a buffer that can be filled with data. */ - template< typename T, typename F > - DynamicMemoryView< T > storeChunk( Offset, Extent, F && createBuffer ); + template + DynamicMemoryView storeChunk(Offset, Extent, F &&createBuffer); /** * Overload of span-based storeChunk() that uses operator new() to create * a buffer. */ - template< typename T > - DynamicMemoryView< T > storeChunk( Offset, Extent ); + template + DynamicMemoryView storeChunk(Offset, Extent); - static constexpr char const * const SCALAR = "\vScalar"; + static constexpr char const *const SCALAR = "\vScalar"; virtual ~RecordComponent() = default; -OPENPMD_protected: + OPENPMD_protected: RecordComponent(); void readBase(); - std::shared_ptr< std::queue< IOTask > > m_chunks; - std::shared_ptr< Attribute > m_constantValue; - std::shared_ptr< bool > m_isEmpty = std::make_shared< bool >( false ); + std::shared_ptr > m_chunks; + std::shared_ptr m_constantValue; + std::shared_ptr m_isEmpty = std::make_shared(false); // User has extended the dataset, but the EXTEND task must yet be flushed // to the backend - std::shared_ptr< bool > m_hasBeenExtended = - std::make_shared< bool >( false ); + std::shared_ptr m_hasBeenExtended = std::make_shared(false); private: - void flush(std::string const&); + void flush(std::string const &, internal::FlushParams const &); virtual void read(); /** @@ -273,7 +259,7 @@ class RecordComponent : public BaseRecordComponent * @param d The dataset description. Must have nonzero dimensions. * @return Reference to this RecordComponent instance. */ - RecordComponent& makeEmpty( Dataset d ); + RecordComponent &makeEmpty(Dataset d); /** * @brief Check recursively whether this RecordComponent is dirty. @@ -286,7 +272,6 @@ class RecordComponent : public BaseRecordComponent bool dirtyRecursive() const; protected: - /** * The same std::string that the parent class would pass as parameter to * RecordComponent::flush(). @@ -295,7 +280,7 @@ class RecordComponent : public BaseRecordComponent * (for use by the Span-based overload of RecordComponent::storeChunk()). * @todo Merge functionality with ownKeyInParent? */ - std::shared_ptr< std::string > m_name = std::make_shared< std::string >(); + std::shared_ptr m_name = std::make_shared(); }; // RecordComponent } // namespace openPMD diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 92ec2beb38..46ef527311 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -276,7 +276,7 @@ RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) * Flush the openPMD hierarchy to the backend without flushing any actual * data yet. */ - seriesFlush( FlushLevel::SkeletonOnly ); + seriesFlush({FlushLevel::SkeletonOnly}); size_t size = 1; for( auto ext : e ) @@ -303,16 +303,16 @@ RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) getBufferView.offset = o; getBufferView.extent = e; getBufferView.dtype = getDatatype(); - IOHandler()->enqueue( IOTask( this, getBufferView ) ); - IOHandler()->flush(); + IOHandler()->enqueue(IOTask(this, getBufferView)); + IOHandler()->flush(internal::defaultFlushParams); auto &out = *getBufferView.out; - if( !out.backendManagedBuffer ) + if (!out.backendManagedBuffer) { - auto data = std::forward< F >( createBuffer )( size ); - out.ptr = static_cast< void * >( data.get() ); - storeChunk( std::move( data ), std::move( o ), std::move( e ) ); + auto data = std::forward(createBuffer)(size); + out.ptr = static_cast(data.get()); + storeChunk(std::move(data), std::move(o), std::move(e)); } - return DynamicMemoryView< T >{ std::move( getBufferView ), size, *this }; + return DynamicMemoryView{std::move(getBufferView), size, *this}; } template< typename T > diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index dac26922f4..8cce096c9d 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -20,9 +20,6 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/IO/Format.hpp" @@ -38,7 +35,7 @@ #include "openPMD/version.hpp" #if openPMD_HAVE_MPI -# include +#include #endif #include @@ -46,10 +43,9 @@ // expose private and protected members for invasive testing #ifndef OPENPMD_private -# define OPENPMD_private private +#define OPENPMD_private private #endif - namespace openPMD { class ReadIterations; @@ -58,56 +54,58 @@ class SeriesInterface; namespace internal { -/** - * @brief Data members for Series. Pinned at one memory location. - * - * (Not movable or copyable) - * - */ -class SeriesData : public AttributableData -{ -public: - explicit SeriesData() = default; - - SeriesData( SeriesData const & ) = delete; - SeriesData( SeriesData && ) = delete; - - SeriesData & operator=( SeriesData const & ) = delete; - SeriesData & operator=( SeriesData && ) = delete; - - virtual ~SeriesData() = default; - - Container< Iteration, uint64_t > iterations{}; - - auxiliary::Option< WriteIterations > m_writeIterations; - auxiliary::Option< std::string > m_overrideFilebasedFilename; - std::string m_name; - std::string m_filenamePrefix; - std::string m_filenamePostfix; - int m_filenamePadding; - IterationEncoding m_iterationEncoding{}; - Format m_format; /** - * Whether a step is currently active for this iteration. - * Used for group-based iteration layout, see SeriesData.hpp for - * iteration-based layout. - * Access via stepStatus() method to automatically select the correct - * one among both flags. - */ - StepStatus m_stepStatus = StepStatus::NoStep; - bool m_parseLazily = false; - bool m_lastFlushSuccessful = true; -}; // SeriesData - -class SeriesInternal; + * @brief Data members for Series. Pinned at one memory location. + * + * (Not movable or copyable) + * + */ + class SeriesData : public AttributableData + { + public: + explicit SeriesData() = default; + + SeriesData(SeriesData const &) = delete; + SeriesData(SeriesData &&) = delete; + + SeriesData &operator=(SeriesData const &) = delete; + SeriesData &operator=(SeriesData &&) = delete; + + virtual ~SeriesData() = default; + + Container iterations{}; + + auxiliary::Option m_writeIterations; + auxiliary::Option m_overrideFilebasedFilename; + std::string m_name; + std::string m_filenamePrefix; + std::string m_filenamePostfix; + int m_filenamePadding; + IterationEncoding m_iterationEncoding{}; + Format m_format; + /** + * Whether a step is currently active for this iteration. + * Used for group-based iteration layout, see SeriesData.hpp for + * iteration-based layout. + * Access via stepStatus() method to automatically select the correct + * one among both flags. + */ + StepStatus m_stepStatus = StepStatus::NoStep; + bool m_parseLazily = false; + bool m_lastFlushSuccessful = true; + }; // SeriesData + + class SeriesInternal; } // namespace internal /** @brief Implementation for the root level of the openPMD hierarchy. * * Entry point and common link between all iterations of particle and mesh data. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series */ class SeriesInterface : public AttributableInterface { @@ -121,105 +119,140 @@ class SeriesInterface : public AttributableInterface protected: // Should not be called publicly, only by implementing classes - SeriesInterface( internal::SeriesData *, internal::AttributableData * ); + SeriesInterface(internal::SeriesData *, internal::AttributableData *); public: /** - * @return String representing the current enforced version of the openPMD standard. + * @return String representing the current enforced version of the openPMD + * standard. */ std::string openPMD() const; - /** Set the version of the enforced openPMD standard. + /** Set the version of the enforced openPMD + * standard. * - * @param openPMD String MAJOR.MINOR.REVISION of the desired version of the openPMD standard. + * @param openPMD String MAJOR.MINOR.REVISION of the + * desired version of the openPMD standard. * @return Reference to modified series. */ - SeriesInterface& setOpenPMD(std::string const& openPMD); + SeriesInterface &setOpenPMD(std::string const &openPMD); /** - * @return 32-bit mask of applied extensions to the openPMD standard. + * @return 32-bit mask of applied extensions to the openPMD + * standard. */ uint32_t openPMDextension() const; - /** Set a 32-bit mask of applied extensions to the openPMD standard. + /** Set a 32-bit mask of applied extensions to the openPMD + * standard. * - * @param openPMDextension Unsigned 32-bit integer used as a bit-mask of applied extensions. + * @param openPMDextension Unsigned 32-bit integer used as a bit-mask of + * applied extensions. * @return Reference to modified series. */ - SeriesInterface& setOpenPMDextension(uint32_t openPMDextension); + SeriesInterface &setOpenPMDextension(uint32_t openPMDextension); /** - * @return String representing the common prefix for all data sets and sub-groups of a specific iteration. + * @return String representing the common prefix for all data sets and + * sub-groups of a specific iteration. */ std::string basePath() const; - /** Set the common prefix for all data sets and sub-groups of a specific iteration. + /** Set the common prefix for all data sets and sub-groups of a specific + * iteration. * - * @param basePath String of the common prefix for all data sets and sub-groups of a specific iteration. + * @param basePath String of the common prefix for all data sets and + * sub-groups of a specific iteration. * @return Reference to modified series. */ - SeriesInterface& setBasePath(std::string const& basePath); + SeriesInterface &setBasePath(std::string const &basePath); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String representing the path to mesh records, relative(!) to basePath. + * @return String representing the path to mesh records, relative(!) to + * basePath. */ std::string meshesPath() const; - /** Set the path to mesh records, relative(!) to basePath. + /** Set the path to mesh + * records, relative(!) to basePath. * - * @param meshesPath String of the path to mesh records, relative(!) to basePath. + * @param meshesPath String of the path to mesh + * records, relative(!) to basePath. * @return Reference to modified series. */ - SeriesInterface& setMeshesPath(std::string const& meshesPath); + SeriesInterface &setMeshesPath(std::string const &meshesPath); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String representing the path to particle species, relative(!) to basePath. + * @return String representing the path to particle species, relative(!) to + * basePath. */ std::string particlesPath() const; - /** Set the path to groups for each particle species, relative(!) to basePath. + /** Set the path to groups for each particle + * species, relative(!) to basePath. * - * @param particlesPath String of the path to groups for each particle species, relative(!) to basePath. + * @param particlesPath String of the path to groups for each particle + * species, relative(!) to basePath. * @return Reference to modified series. */ - SeriesInterface& setParticlesPath(std::string const& particlesPath); + SeriesInterface &setParticlesPath(std::string const &particlesPath); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating author and contact for the information in the file. + * @return String indicating author and contact for the information in the + * file. */ std::string author() const; /** Indicate the author and contact for the information in the file. * - * @param author String indicating author and contact for the information in the file. + * @param author String indicating author and contact for the information + * in the file. * @return Reference to modified series. */ - SeriesInterface& setAuthor(std::string const& author); + SeriesInterface &setAuthor(std::string const &author); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating the software/code/simulation that created the file; + * @return String indicating the software/code/simulation that created the + * file; */ std::string software() const; /** Indicate the software/code/simulation that created the file. * - * @param newName String indicating the software/code/simulation that created the file. - * @param newVersion String indicating the version of the software/code/simulation that created the file. + * @param newName String indicating the software/code/simulation that + * created the file. + * @param newVersion String indicating the version of the + * software/code/simulation that created the file. * @return Reference to modified series. */ - SeriesInterface& setSoftware(std::string const& newName, std::string const& newVersion = std::string("unspecified")); + SeriesInterface &setSoftware( + std::string const &newName, + std::string const &newVersion = std::string("unspecified")); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating the version of the software/code/simulation that created the file. + * @return String indicating the version of the software/code/simulation + * that created the file. */ std::string softwareVersion() const; - /** Indicate the version of the software/code/simulation that created the file. + /** Indicate the version of the software/code/simulation that created the + * file. * * @deprecated Set the version with the second argument of setSoftware() * - * @param softwareVersion String indicating the version of the software/code/simulation that created the file. + * @param softwareVersion String indicating the version of the + * software/code/simulation that created the file. * @return Reference to modified series. */ - [[deprecated("Set the version with the second argument of setSoftware()")]] - SeriesInterface& setSoftwareVersion(std::string const& softwareVersion); + [[deprecated( + "Set the version with the second argument of " + "setSoftware()")]] SeriesInterface & + setSoftwareVersion(std::string const &softwareVersion); /** * @throw no_such_attribute_error If optional attribute is not present. @@ -231,61 +264,79 @@ class SeriesInterface : public AttributableInterface * @param date String indicating the date of creation. * @return Reference to modified series. */ - SeriesInterface& setDate(std::string const& date); + SeriesInterface &setDate(std::string const &date); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating dependencies of software that were used to create the file. + * @return String indicating dependencies of software that were used to + * create the file. */ std::string softwareDependencies() const; /** Indicate dependencies of software that were used to create the file. * - * @param newSoftwareDependencies String indicating dependencies of software that were used to create the file (semicolon-separated list if needed). + * @param newSoftwareDependencies String indicating dependencies of + * software that were used to create the file (semicolon-separated list if + * needed). * @return Reference to modified series. */ - SeriesInterface& setSoftwareDependencies(std::string const& newSoftwareDependencies); + SeriesInterface & + setSoftwareDependencies(std::string const &newSoftwareDependencies); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating the machine or relevant hardware that created the file. + * @return String indicating the machine or relevant hardware that created + * the file. */ std::string machine() const; /** Indicate the machine or relevant hardware that created the file. * - * @param newMachine String indicating the machine or relevant hardware that created the file (semicolon-separated list if needed).. + * @param newMachine String indicating the machine or relevant hardware + * that created the file (semicolon-separated list if needed).. * @return Reference to modified series. */ - SeriesInterface& setMachine(std::string const& newMachine); + SeriesInterface &setMachine(std::string const &newMachine); /** * @return Current encoding style for multiple iterations in this series. */ IterationEncoding iterationEncoding() const; - /** Set the encoding style for multiple iterations in this series. - * A preview on the openPMD 2.0 variable-based iteration encoding can be activated with this call. - * Making full use of the variable-based iteration encoding requires (1) explicit support by the backend (available only in ADIOS2) and (2) use of the openPMD streaming API. - * In other backends and without the streaming API, only one iteration/snapshot may be written in the variable-based encoding, making this encoding a good choice for single-snapshot data dumps. + /** Set the encoding + * style for multiple iterations in this series. A preview on the openPMD 2.0 + * variable-based iteration encoding can be activated with this call. + * Making full use of the variable-based iteration encoding requires (1) + * explicit support by the backend (available only in ADIOS2) and (2) use of + * the openPMD streaming API. In other backends and without the streaming + * API, only one iteration/snapshot may be written in the variable-based + * encoding, making this encoding a good choice for single-snapshot data + * dumps. * - * @param iterationEncoding Desired encoding style for multiple iterations in this series. + * @param iterationEncoding Desired encoding + * style for multiple iterations in this series. * @return Reference to modified series. */ - SeriesInterface& setIterationEncoding(IterationEncoding iterationEncoding); + SeriesInterface &setIterationEncoding(IterationEncoding iterationEncoding); /** - * @return String describing a pattern describing how to access single iterations in the raw file. + * @return String describing a pattern + * describing how to access single iterations in the raw file. */ std::string iterationFormat() const; - /** Set a pattern describing how to access single iterations in the raw file. + /** Set a pattern + * describing how to access single iterations in the raw file. * - * @param iterationFormat String with the iteration regex \%T defining either - * the series of files (fileBased) - * or the series of groups within a single file (groupBased) - * that allows to extract the iteration from it. - * For fileBased formats the iteration must be included in the file name. - * The format depends on the selected iterationEncoding method. + * @param iterationFormat String with the iteration regex \%T + * defining either the series of files (fileBased) or the series of groups + * within a single file (groupBased) that allows to extract the iteration + * from it. For fileBased formats the iteration must be included in the file + * name. The format depends on the selected iterationEncoding method. * @return Reference to modified series. */ - SeriesInterface& setIterationFormat(std::string const& iterationFormat); + SeriesInterface &setIterationFormat(std::string const &iterationFormat); /** * @return String of a pattern for file names. @@ -294,10 +345,11 @@ class SeriesInterface : public AttributableInterface /** Set the pattern for file names. * - * @param name String of the pattern for file names. Must include iteration regex \%T for fileBased data. + * @param name String of the pattern for file names. Must include + * iteration regex \%T for fileBased data. * @return Reference to modified series. */ - SeriesInterface& setName(std::string const& name); + SeriesInterface &setName(std::string const &name); /** The currently used backend * @@ -311,60 +363,64 @@ class SeriesInterface : public AttributableInterface */ void flush(); -OPENPMD_private: + OPENPMD_private: static constexpr char const * const BASEPATH = "/data/%T/"; struct ParsedInput; using iterations_t = decltype(internal::SeriesData::iterations); using iterations_iterator = iterations_t::iterator; - internal::SeriesData * m_series = nullptr; + internal::SeriesData *m_series = nullptr; - inline internal::SeriesData & get() + inline internal::SeriesData &get() { - if( m_series ) + if (m_series) { return *m_series; } else { throw std::runtime_error( - "[Series] Cannot use default-constructed Series." ); + "[Series] Cannot use default-constructed Series."); } } - inline internal::SeriesData const & get() const + inline internal::SeriesData const &get() const { - if( m_series ) + if (m_series) { return *m_series; } else { throw std::runtime_error( - "[Series] Cannot use default-constructed Series." ); - } } + "[Series] Cannot use default-constructed Series."); + } + } - std::unique_ptr< ParsedInput > parseInput(std::string); - void init(std::shared_ptr< AbstractIOHandler >, std::unique_ptr< ParsedInput >); - void initDefaults( IterationEncoding ); + std::unique_ptr parseInput(std::string); + void init(std::shared_ptr, std::unique_ptr); + void initDefaults(IterationEncoding); /** * @brief Internal call for flushing a Series. * * Any flushing of the Series will pass through this call. - * + * * @param begin Start of the range of iterations to flush. * @param end End of the range of iterations to flush. - * @param level Flush level, as documented in AbstractIOHandler.hpp. + * @param flushParams Flush params, as documented in AbstractIOHandler.hpp. * @param flushIOHandler Tasks will always be enqueued to the backend. * If this flag is true, tasks will be flushed to the backend. */ - std::future< void > flush_impl( + std::future flush_impl( iterations_iterator begin, iterations_iterator end, - FlushLevel level, - bool flushIOHandler = true ); - void flushFileBased( iterations_iterator begin, iterations_iterator end ); + internal::FlushParams flushParams, + bool flushIOHandler = true); + void flushFileBased( + iterations_iterator begin, + iterations_iterator end, + internal::FlushParams flushParams); /* * Group-based and variable-based iteration layouts share a lot of logic * (realistically, the variable-based iteration layout only throws out @@ -372,19 +428,22 @@ class SeriesInterface : public AttributableInterface * As a convention, methods that deal with both layouts are called * .*GorVBased, short for .*GroupOrVariableBased */ - void flushGorVBased( iterations_iterator begin, iterations_iterator end ); + void flushGorVBased( + iterations_iterator begin, + iterations_iterator end, + internal::FlushParams flushParams); void flushMeshesPath(); void flushParticlesPath(); - void readFileBased( ); - void readOneIterationFileBased( std::string const & filePath ); + void readFileBased(); + void readOneIterationFileBased(std::string const &filePath); /** * Note on re-parsing of a Series: * If init == false, the parsing process will seek for new * Iterations/Records/Record Components etc. */ - void readGorVBased( bool init = true ); + void readGorVBased(bool init = true); void readBase(); - std::string iterationFilename( uint64_t i ); + std::string iterationFilename(uint64_t i); enum class IterationOpened : bool { @@ -397,21 +456,20 @@ class SeriesInterface : public AttributableInterface * Only open if the iteration is dirty and if it is not in deferred * parse state. */ - IterationOpened openIterationIfDirty( uint64_t index, Iteration iteration ); + IterationOpened openIterationIfDirty(uint64_t index, Iteration iteration); /* * Open an iteration. Ensures that the iteration's m_closed status * is set properly and that any files pertaining to the iteration * is opened. * Does not create files when called in CREATE mode. */ - void openIteration( uint64_t index, Iteration iteration ); + void openIteration(uint64_t index, Iteration iteration); /** * Find the given iteration in Series::iterations and return an iterator * into Series::iterations at that place. */ - iterations_iterator - indexOf( Iteration const & ); + iterations_iterator indexOf(Iteration const &); /** * @brief In step-based IO mode, begin or end an IO step for the given @@ -428,39 +486,40 @@ class SeriesInterface : public AttributableInterface * @param iteration The actual Iteration object. * @return AdvanceStatus */ - AdvanceStatus - advance( + AdvanceStatus advance( AdvanceMode mode, - internal::AttributableData & file, + internal::AttributableData &file, iterations_iterator it, - Iteration & iteration ); + Iteration &iteration); }; // SeriesInterface namespace internal { -class SeriesInternal : public SeriesData, public SeriesInterface -{ - friend struct SeriesShared; - friend class openPMD::Iteration; - friend class openPMD::Series; - friend class openPMD::Writable; + class SeriesInternal + : public SeriesData + , public SeriesInterface + { + friend struct SeriesShared; + friend class openPMD::Iteration; + friend class openPMD::Series; + friend class openPMD::Writable; -public: + public: #if openPMD_HAVE_MPI - SeriesInternal( - std::string const & filepath, - Access at, - MPI_Comm comm, - std::string const & options = "{}" ); + SeriesInternal( + std::string const &filepath, + Access at, + MPI_Comm comm, + std::string const &options = "{}"); #endif - SeriesInternal( - std::string const & filepath, - Access at, - std::string const & options = "{}" ); - // @todo make AttributableInterface<>::linkHierarchy non-virtual - virtual ~SeriesInternal(); -}; + SeriesInternal( + std::string const &filepath, + Access at, + std::string const &options = "{}"); + // @todo make AttributableInterface<>::linkHierarchy non-virtual + virtual ~SeriesInternal(); + }; } // namespace internal /** @brief Root level of the openPMD hierarchy. @@ -470,26 +529,28 @@ class SeriesInternal : public SeriesData, public SeriesInterface * An instance can be created either directly via the given constructors or via * the SeriesBuilder class. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series */ class Series : public SeriesInterface { private: - std::shared_ptr< internal::SeriesInternal > m_series; + std::shared_ptr m_series; // constructor from private parts - Series( std::shared_ptr< internal::SeriesInternal > ); + Series(std::shared_ptr); public: explicit Series(); #if openPMD_HAVE_MPI Series( - std::string const & filepath, + std::string const &filepath, Access at, MPI_Comm comm, - std::string const & options = "{}" ); + std::string const &options = "{}"); #endif /** @@ -502,13 +563,13 @@ class Series : public SeriesInterface * to a JSON textfile, prepended by an at sign '@'. */ Series( - std::string const & filepath, + std::string const &filepath, Access at, - std::string const & options = "{}" ); + std::string const &options = "{}"); virtual ~Series() = default; - Container< Iteration, uint64_t > iterations; + Container iterations; /** * @brief Is this a usable Series object? diff --git a/include/openPMD/Span.hpp b/include/openPMD/Span.hpp index e4d0689afe..11325aea52 100644 --- a/include/openPMD/Span.hpp +++ b/include/openPMD/Span.hpp @@ -33,37 +33,36 @@ namespace openPMD * Any existing member behaves equivalently to those documented here: * https://en.cppreference.com/w/cpp/container/span */ -template< typename T > +template class Span { - template< typename > + template friend class DynamicMemoryView; private: - T * m_ptr; + T *m_ptr; size_t m_size; - Span( T * ptr, size_t size ) : m_ptr( ptr ), m_size( size ) - { - } + Span(T *ptr, size_t size) : m_ptr(ptr), m_size(size) + {} public: using iterator = T *; - using reverse_iterator = std::reverse_iterator< iterator >; + using reverse_iterator = std::reverse_iterator; size_t size() const { return m_size; } - inline T * data() const + inline T *data() const { return m_ptr; } - inline T & operator[]( size_t i ) const + inline T &operator[](size_t i) const { - return data()[ i ]; + return data()[i]; } inline iterator begin() const @@ -77,11 +76,11 @@ class Span inline reverse_iterator rbegin() const { // std::reverse_iterator does the -1 thing automatically - return reverse_iterator{ data() + size() }; + return reverse_iterator{data() + size()}; } inline reverse_iterator rend() const { - return reverse_iterator{ data() }; + return reverse_iterator{data()}; } }; @@ -93,22 +92,22 @@ class Span * Hence, the concrete pointer needs to be acquired right before writing * to it. Otherwise, a use after free might occur. */ -template< typename T > +template class DynamicMemoryView { friend class RecordComponent; private: - using param_t = Parameter< Operation::GET_BUFFER_VIEW >; + using param_t = Parameter; param_t m_param; size_t m_size; RecordComponent m_recordComponent; DynamicMemoryView( - param_t param, size_t size, RecordComponent recordComponent ) - : m_param( std::move( param ) ) - , m_size( size ) - , m_recordComponent( std::move( recordComponent ) ) + param_t param, size_t size, RecordComponent recordComponent) + : m_param(std::move(param)) + , m_size(size) + , m_recordComponent(std::move(recordComponent)) { m_param.update = true; } @@ -117,16 +116,16 @@ class DynamicMemoryView /** * @brief Acquire the underlying buffer at its current position in memory. */ - Span< T > currentBuffer() + Span currentBuffer() { - if( m_param.out->backendManagedBuffer ) + if (m_param.out->backendManagedBuffer) { // might need to update m_recordComponent.IOHandler()->enqueue( - IOTask( &m_recordComponent, m_param ) ); - m_recordComponent.IOHandler()->flush(); + IOTask(&m_recordComponent, m_param)); + m_recordComponent.IOHandler()->flush(internal::defaultFlushParams); } - return Span< T >{ static_cast< T * >( m_param.out->ptr ), m_size }; + return Span{static_cast(m_param.out->ptr), m_size}; } }; -} +} // namespace openPMD diff --git a/include/openPMD/Streaming.hpp b/include/openPMD/Streaming.hpp index 19c6bdd88c..7bc84341aa 100644 --- a/include/openPMD/Streaming.hpp +++ b/include/openPMD/Streaming.hpp @@ -19,7 +19,7 @@ namespace openPMD */ enum class AdvanceStatus : unsigned char { - OK, /* stream goes on */ + OK, /* stream goes on */ OVER /* stream is over */ }; @@ -44,6 +44,6 @@ enum class AdvanceMode : unsigned char enum class StepStatus : unsigned char { DuringStep, /* step is currently active */ - NoStep /* no step is currently active */ + NoStep /* no step is currently active */ }; } // namespace openPMD diff --git a/include/openPMD/UnitDimension.hpp b/include/openPMD/UnitDimension.hpp index 11ad683d86..232a6ee1f7 100644 --- a/include/openPMD/UnitDimension.hpp +++ b/include/openPMD/UnitDimension.hpp @@ -22,21 +22,20 @@ #include - namespace openPMD { - /** Physical dimension of a record - * - * Dimensional base quantities of the international system of quantities - */ - enum class UnitDimension : uint8_t - { - L = 0, //!< length - M, //!< mass - T, //!< time - I, //!< electric current - theta, //!< thermodynamic temperature - N, //!< amount of substance - J //!< luminous intensity - }; +/** Physical dimension of a record + * + * Dimensional base quantities of the international system of quantities + */ +enum class UnitDimension : uint8_t +{ + L = 0, //!< length + M, //!< mass + T, //!< time + I, //!< electric current + theta, //!< thermodynamic temperature + N, //!< amount of substance + J //!< luminous intensity +}; } // namespace openPMD diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 3cb304aa01..71b9b3ad9b 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -24,7 +24,6 @@ #include "openPMD/auxiliary/Option.hpp" #include "openPMD/backend/Container.hpp" - namespace openPMD { class Series; @@ -44,13 +43,13 @@ class Series; * not possible once it has been closed. * */ -class WriteIterations : private Container< Iteration, uint64_t > +class WriteIterations : private Container { friend class Series; private: - using iterations_t = Container< Iteration, uint64_t >; - + using iterations_t = Container; + public: using key_type = typename iterations_t::key_type; using mapped_type = typename iterations_t::mapped_type; @@ -61,19 +60,19 @@ class WriteIterations : private Container< Iteration, uint64_t > struct SharedResources { iterations_t iterations; - auxiliary::Option< uint64_t > currentlyOpen; + auxiliary::Option currentlyOpen; - SharedResources( iterations_t ); + SharedResources(iterations_t); ~SharedResources(); }; - WriteIterations( iterations_t ); + WriteIterations(iterations_t); explicit WriteIterations() = default; //! Index of the last opened iteration - std::shared_ptr< SharedResources > shared; + std::shared_ptr shared; public: - mapped_type & operator[]( key_type const & key ) override; - mapped_type & operator[]( key_type && key ) override; + mapped_type &operator[](key_type const &key) override; + mapped_type &operator[](key_type &&key) override; }; } // namespace openPMD diff --git a/include/openPMD/auxiliary/Date.hpp b/include/openPMD/auxiliary/Date.hpp index 0377716c9f..c17a9aca2c 100644 --- a/include/openPMD/auxiliary/Date.hpp +++ b/include/openPMD/auxiliary/Date.hpp @@ -22,16 +22,17 @@ #include - namespace openPMD { namespace auxiliary { /** Return the current datetime as string * - * @param format time format string, @see http://www.cplusplus.com/reference/ctime/strftime/ + * @param format time format string, @see + * http://www.cplusplus.com/reference/ctime/strftime/ * @return std::string with formatted date */ - std::string getDateString( std::string const & format = std::string( "%F %T %z" ) ); + std::string + getDateString(std::string const &format = std::string("%F %T %z")); } // namespace auxiliary } // namespace openPMD diff --git a/include/openPMD/auxiliary/DerefDynamicCast.hpp b/include/openPMD/auxiliary/DerefDynamicCast.hpp index 9a483e147c..1620334ea0 100644 --- a/include/openPMD/auxiliary/DerefDynamicCast.hpp +++ b/include/openPMD/auxiliary/DerefDynamicCast.hpp @@ -22,10 +22,10 @@ #include - namespace openPMD { -namespace auxiliary { +namespace auxiliary +{ /** Returns a value reference stored in a dynamically casted pointer * * Safe version of *dynamic_cast< New_Type* >( some_ptr ); This function @@ -35,12 +35,13 @@ namespace auxiliary { * @tparam New_Type new type to cast to * @tparam Old_Type old type to cast from * @param[in] ptr and input pointer type - * @return value reference of a dereferenced, dynamically casted ptr to New_Type* + * @return value reference of a dereferenced, dynamically casted ptr to + * New_Type* */ - template - inline New_Type & - deref_dynamic_cast(Old_Type *ptr) { - auto const tmp_ptr = dynamic_cast< New_Type * >( ptr ); + template + inline New_Type &deref_dynamic_cast(Old_Type *ptr) + { + auto const tmp_ptr = dynamic_cast(ptr); if (tmp_ptr == nullptr) throw std::runtime_error("Dynamic cast returned a nullptr!"); return *tmp_ptr; diff --git a/include/openPMD/auxiliary/Environment.hpp b/include/openPMD/auxiliary/Environment.hpp index 7971141b7a..2995d2d673 100644 --- a/include/openPMD/auxiliary/Environment.hpp +++ b/include/openPMD/auxiliary/Environment.hpp @@ -24,34 +24,34 @@ #include #include #include -#include #include - +#include namespace openPMD { namespace auxiliary { - inline std::string getEnvString( std::string const & key, std::string const defaultValue ) + inline std::string + getEnvString(std::string const &key, std::string const defaultValue) { - char const * env = std::getenv( key.c_str( ) ); - if ( env != nullptr ) + char const *env = std::getenv(key.c_str()); + if (env != nullptr) return std::string{env}; else return defaultValue; } - inline int getEnvNum( std::string const & key, int defaultValue ) + inline int getEnvNum(std::string const &key, int defaultValue) { - char const * env = std::getenv( key.c_str( ) ); - if ( env != nullptr ) + char const *env = std::getenv(key.c_str()); + if (env != nullptr) { std::string env_string{env}; try { - return std::stoi( env_string ); + return std::stoi(env_string); } - catch ( std::invalid_argument const & ) + catch (std::invalid_argument const &) { return defaultValue; } diff --git a/include/openPMD/auxiliary/Export.hpp b/include/openPMD/auxiliary/Export.hpp index 4ad10a571e..394397a092 100644 --- a/include/openPMD/auxiliary/Export.hpp +++ b/include/openPMD/auxiliary/Export.hpp @@ -21,20 +21,22 @@ #pragma once #ifndef OPENPMDAPI_EXPORT -# ifdef _MSC_VER -# define OPENPMDAPI_EXPORT __declspec( dllexport ) -# elif defined(__NVCC__) -# define OPENPMDAPI_EXPORT -# else -# define OPENPMDAPI_EXPORT __attribute__((visibility("default"))) -# endif +#ifdef _MSC_VER +#define OPENPMDAPI_EXPORT __declspec(dllexport) +#elif defined(__NVCC__) +#define OPENPMDAPI_EXPORT +#else +#define OPENPMDAPI_EXPORT __attribute__((visibility("default"))) +#endif #endif #ifndef OPENPMDAPI_EXPORT_ENUM_CLASS -# if defined(__GNUC__) && (__GNUC__ < 6) && !defined(__clang__) && !defined(__INTEL_COMPILER) - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43407 -# define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) enum class ECNAME : OPENPMDAPI_EXPORT unsigned int -# else -# define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) enum class OPENPMDAPI_EXPORT ECNAME -# endif +#if defined(__GNUC__) && (__GNUC__ < 6) && !defined(__clang__) && \ + !defined(__INTEL_COMPILER) +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43407 +#define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) \ + enum class ECNAME : OPENPMDAPI_EXPORT unsigned int +#else +#define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) enum class OPENPMDAPI_EXPORT ECNAME +#endif #endif diff --git a/include/openPMD/auxiliary/Filesystem.hpp b/include/openPMD/auxiliary/Filesystem.hpp index 686cbf1384..ab728b05d0 100644 --- a/include/openPMD/auxiliary/Filesystem.hpp +++ b/include/openPMD/auxiliary/Filesystem.hpp @@ -26,7 +26,7 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_MPI -# include +#include #endif namespace openPMD @@ -34,70 +34,70 @@ namespace openPMD namespace auxiliary { #ifdef _WIN32 -static constexpr char const directory_separator = '\\'; + static constexpr char const directory_separator = '\\'; #else -static constexpr char const directory_separator = '/'; + static constexpr char const directory_separator = '/'; #endif -/** Check if a directory exists at a give absolute or relative path. - * - * @param path Absolute or relative path to examine. - * @return true if the given path or file status corresponds to an existing directory, false otherwise. - */ -bool -directory_exists(std::string const& path); + /** Check if a directory exists at a give absolute or relative path. + * + * @param path Absolute or relative path to examine. + * @return true if the given path or file status corresponds to an existing + * directory, false otherwise. + */ + bool directory_exists(std::string const &path); -/** Check if a file exists at a given absolute or relative path. - * - * @param path Absolute or relative path to examine. - * @return true if the given path or file status corresponds to an existing file, false otherwise. - */ -bool -file_exists(std::string const& path); + /** Check if a file exists at a given absolute or relative path. + * + * @param path Absolute or relative path to examine. + * @return true if the given path or file status corresponds to an existing + * file, false otherwise. + */ + bool file_exists(std::string const &path); -/** List all contents of a directory at a given absolute or relative path. - * - * @note The equivalent of `ls path` - * @note Both contained files and directories are listed. - * `.` and `..` are not returned. - * @throw std::system_error when the given path is not a valid directory. - * @param path Absolute or relative path of directory to examine. - * @return Vector of all contained files and directories. - */ -std::vector< std::string > -list_directory(std::string const& path ); + /** List all contents of a directory at a given absolute or relative path. + * + * @note The equivalent of `ls path` + * @note Both contained files and directories are listed. + * `.` and `..` are not returned. + * @throw std::system_error when the given path is not a valid directory. + * @param path Absolute or relative path of directory to examine. + * @return Vector of all contained files and directories. + */ + std::vector list_directory(std::string const &path); -/** Create all required directories to have a reachable given absolute or relative path. - * - * @note The equivalent of `mkdir -p path` - * @param path Absolute or relative path to the new directory to create. - * @return true if a directory was created for the directory p resolves to, false otherwise. - */ -bool -create_directories(std::string const& path); + /** Create all required directories to have a reachable given absolute or + * relative path. + * + * @note The equivalent of `mkdir -p path` + * @param path Absolute or relative path to the new directory to + * create. + * @return true if a directory was created for the directory p resolves to, + * false otherwise. + */ + bool create_directories(std::string const &path); -/** Remove the directory identified by the given path. - * - * @note The equivalent of `rm -r path`. - * @param path Absolute or relative path to the directory to delete. - * @return true if the directory was deleted, false otherwise and if it did not exist. - */ -bool -remove_directory(std::string const& path); + /** Remove the directory identified by the given path. + * + * @note The equivalent of `rm -r path`. + * @param path Absolute or relative path to the directory to delete. + * @return true if the directory was deleted, false otherwise and if it did + * not exist. + */ + bool remove_directory(std::string const &path); -/** Remove the file identified by the given path. - * - * @note The equivalent of `rm path`. - * @param path Absolute or relative path to the file to delete. - * @return true if the file was deleted, false otherwise and if it did not exist. - */ -bool -remove_file(std::string const& path); + /** Remove the file identified by the given path. + * + * @note The equivalent of `rm path`. + * @param path Absolute or relative path to the file to delete. + * @return true if the file was deleted, false otherwise and if it did not + * exist. + */ + bool remove_file(std::string const &path); #if openPMD_HAVE_MPI -std::string -collective_file_read( std::string const & path, MPI_Comm ); + std::string collective_file_read(std::string const &path, MPI_Comm); #endif } // namespace auxiliary diff --git a/include/openPMD/auxiliary/JSON.hpp b/include/openPMD/auxiliary/JSON.hpp index 0cbdad175e..a051709d95 100644 --- a/include/openPMD/auxiliary/JSON.hpp +++ b/include/openPMD/auxiliary/JSON.hpp @@ -26,10 +26,10 @@ #include #if openPMD_HAVE_MPI -# include +#include #endif -#include // std::shared_ptr +#include // std::shared_ptr #include // std::forward namespace openPMD @@ -53,21 +53,20 @@ namespace auxiliary { public: TracingJSON(); - TracingJSON( nlohmann::json ); + TracingJSON(nlohmann::json); /** * @brief Access the underlying JSON value * * @return nlohmann::json& */ - inline nlohmann::json & - json() + inline nlohmann::json &json() { return *m_positionInOriginal; } - template< typename Key > - TracingJSON operator[]( Key && key ); + template + TracingJSON operator[](Key &&key); /** * @brief Get the "shadow", i.e. a copy of the original JSON value @@ -75,8 +74,7 @@ namespace auxiliary * * @return nlohmann::json const& */ - nlohmann::json const & - getShadow(); + nlohmann::json const &getShadow(); /** * @brief Invert the "shadow", i.e. a copy of the original JSON value @@ -84,15 +82,13 @@ namespace auxiliary * * @return nlohmann::json */ - nlohmann::json - invertShadow(); + nlohmann::json invertShadow(); /** * @brief Declare all keys of the current object read. * */ - void - declareFullyRead(); + void declareFullyRead(); private: /** @@ -101,7 +97,7 @@ namespace auxiliary * operator[]() in order to avoid use-after-free situations. * */ - std::shared_ptr< nlohmann::json > m_originalJSON; + std::shared_ptr m_originalJSON; /** * @brief A JSON object keeping track of all accessed indices within the * original JSON object. Initially an empty JSON object, @@ -111,44 +107,43 @@ namespace auxiliary * operator[]() in order to avoid use-after-free situations. * */ - std::shared_ptr< nlohmann::json > m_shadow; + std::shared_ptr m_shadow; /** * @brief The sub-expression within m_originalJSON corresponding with * the current instance. * */ - nlohmann::json * m_positionInOriginal; + nlohmann::json *m_positionInOriginal; /** * @brief The sub-expression within m_positionInOriginal corresponding * with the current instance. * */ - nlohmann::json * m_positionInShadow; + nlohmann::json *m_positionInShadow; bool m_trace = true; - void - invertShadow( nlohmann::json & result, nlohmann::json const & shadow ); + void invertShadow(nlohmann::json &result, nlohmann::json const &shadow); TracingJSON( - std::shared_ptr< nlohmann::json > originalJSON, - std::shared_ptr< nlohmann::json > shadow, - nlohmann::json * positionInOriginal, - nlohmann::json * positionInShadow, - bool trace ); + std::shared_ptr originalJSON, + std::shared_ptr shadow, + nlohmann::json *positionInOriginal, + nlohmann::json *positionInShadow, + bool trace); }; - template< typename Key > - TracingJSON TracingJSON::operator[]( Key && key ) + template + TracingJSON TracingJSON::operator[](Key &&key) { - nlohmann::json * newPositionInOriginal = - &m_positionInOriginal->operator[]( key ); + nlohmann::json *newPositionInOriginal = + &m_positionInOriginal->operator[](key); // If accessing a leaf in the JSON tree from an object (not an array!) // erase the corresponding key static nlohmann::json nullvalue; - nlohmann::json * newPositionInShadow = &nullvalue; - if( m_trace && m_positionInOriginal->is_object() ) + nlohmann::json *newPositionInShadow = &nullvalue; + if (m_trace && m_positionInOriginal->is_object()) { - newPositionInShadow = &m_positionInShadow->operator[]( key ); + newPositionInShadow = &m_positionInShadow->operator[](key); } bool traceFurther = newPositionInOriginal->is_object(); return TracingJSON( @@ -156,7 +151,7 @@ namespace auxiliary m_shadow, newPositionInOriginal, newPositionInShadow, - traceFurther ); + traceFurther); } /** @@ -166,14 +161,14 @@ namespace auxiliary * * @param options as a parsed JSON object. */ - nlohmann::json parseOptions( std::string const & options ); + nlohmann::json parseOptions(std::string const &options); #if openPMD_HAVE_MPI /** * Parallel version of parseOptions(). MPI-collective. */ - nlohmann::json parseOptions( std::string const & options, MPI_Comm comm ); + nlohmann::json parseOptions(std::string const &options, MPI_Comm comm); #endif diff --git a/include/openPMD/auxiliary/Memory.hpp b/include/openPMD/auxiliary/Memory.hpp index 130398d4d1..8752f0a4da 100644 --- a/include/openPMD/auxiliary/Memory.hpp +++ b/include/openPMD/auxiliary/Memory.hpp @@ -28,127 +28,135 @@ #include #include - namespace openPMD { namespace auxiliary { -inline std::unique_ptr< void, std::function< void(void*) > > -allocatePtr(Datatype dtype, uint64_t numPoints) -{ - void* data = nullptr; - std::function< void(void*) > del = [](void*){}; - switch( dtype ) + inline std::unique_ptr > + allocatePtr(Datatype dtype, uint64_t numPoints) { - using DT = Datatype; + void *data = nullptr; + std::function del = [](void *) {}; + switch (dtype) + { + using DT = Datatype; case DT::VEC_STRING: - data = new char*[numPoints]; - del = [](void* p){ delete[] static_cast< char** >(p); }; + data = new char *[numPoints]; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_LONG_DOUBLE: case DT::LONG_DOUBLE: data = new long double[numPoints]; - del = [](void* p){ delete[] static_cast< long double* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::ARR_DBL_7: case DT::VEC_DOUBLE: case DT::DOUBLE: data = new double[numPoints]; - del = [](void* p){ delete[] static_cast< double* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_FLOAT: case DT::FLOAT: data = new float[numPoints]; - del = [](void* p){ delete[] static_cast< float* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_CLONG_DOUBLE: case DT::CLONG_DOUBLE: data = new std::complex[numPoints]; - del = [](void* p){ delete[] static_cast< std::complex* >(p); }; + del = [](void *p) { + delete[] static_cast *>(p); + }; break; case DT::VEC_CDOUBLE: case DT::CDOUBLE: data = new std::complex[numPoints]; - del = [](void* p){ delete[] static_cast< std::complex* >(p); }; + del = [](void *p) { + delete[] static_cast *>(p); + }; break; case DT::VEC_CFLOAT: case DT::CFLOAT: data = new std::complex[numPoints]; - del = [](void* p){ delete[] static_cast< std::complex* >(p); }; + del = [](void *p) { + delete[] static_cast *>(p); + }; break; case DT::VEC_SHORT: case DT::SHORT: data = new short[numPoints]; - del = [](void* p){ delete[] static_cast< short* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_INT: case DT::INT: data = new int[numPoints]; - del = [](void* p){ delete[] static_cast< int* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_LONG: case DT::LONG: data = new long[numPoints]; - del = [](void* p){ delete[] static_cast< long* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_LONGLONG: case DT::LONGLONG: data = new long long[numPoints]; - del = [](void* p){ delete[] static_cast< long long* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_USHORT: case DT::USHORT: data = new unsigned short[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned short* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_UINT: case DT::UINT: data = new unsigned int[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned int* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_ULONG: case DT::ULONG: data = new unsigned long[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned long* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_ULONGLONG: case DT::ULONGLONG: data = new unsigned long long[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned long long* >(p); }; + del = [](void *p) { + delete[] static_cast(p); + }; break; case DT::VEC_CHAR: case DT::CHAR: data = new char[numPoints]; - del = [](void* p){ delete[] static_cast< char* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_UCHAR: case DT::UCHAR: data = new unsigned char[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned char* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::BOOL: data = new bool[numPoints]; - del = [](void* p){ delete[] static_cast< bool* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::STRING: /* user assigns c_str pointer */ break; case DT::UNDEFINED: default: - throw std::runtime_error("Unknown Attribute datatype (Pointer allocation)"); - } + throw std::runtime_error( + "Unknown Attribute datatype (Pointer allocation)"); + } - return std::unique_ptr< void, std::function< void(void*) > >(data, del); -} + return std::unique_ptr >(data, del); + } -inline std::unique_ptr< void, std::function< void(void*) > > -allocatePtr(Datatype dtype, Extent const& e) -{ - uint64_t numPoints = 1u; - for( auto const& dimensionSize : e ) - numPoints *= dimensionSize; - return allocatePtr(dtype, numPoints); -} + inline std::unique_ptr > + allocatePtr(Datatype dtype, Extent const &e) + { + uint64_t numPoints = 1u; + for (auto const &dimensionSize : e) + numPoints *= dimensionSize; + return allocatePtr(dtype, numPoints); + } -} // auxiliary -} // openPMD +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/Option.hpp b/include/openPMD/auxiliary/Option.hpp index 1140a5c33f..0205dccd22 100644 --- a/include/openPMD/auxiliary/Option.hpp +++ b/include/openPMD/auxiliary/Option.hpp @@ -23,9 +23,8 @@ #include "VariantSrc.hpp" -#include // std::forward, std::move #include - +#include // std::forward, std::move namespace openPMD { @@ -34,8 +33,7 @@ namespace auxiliary namespace detail { struct Empty - { - }; + {}; } // namespace detail /** @@ -43,67 +41,62 @@ namespace auxiliary * * @tparam T Type that can be optionally stored in an Optional object. */ - template< typename T > + template class Option { - using data_t = variantSrc::variant< T, detail::Empty >; + using data_t = variantSrc::variant; data_t m_data; public: /** Create an empty Option. */ - explicit Option() : m_data( detail::Empty() ) - { - } + explicit Option() : m_data(detail::Empty()) + {} /** Create a full Option. * * @param data The object to emplace in the Option. */ - Option( T data ) : m_data( std::move( data ) ) - { - } + Option(T data) : m_data(std::move(data)) + {} - Option( Option const & other ) = default; + Option(Option const &other) = default; - Option & - operator=( Option && other ) + Option &operator=(Option &&other) { - if( other.has_value() ) + if (other.has_value()) { - m_data.template emplace< 0 >( std::move( other.get() ) ); + m_data.template emplace<0>(std::move(other.get())); } else { - m_data.template emplace< 1 >( detail::Empty() ); + m_data.template emplace<1>(detail::Empty()); } return *this; } - Option & - operator=( Option const & other ) + Option &operator=(Option const &other) { - if( other.has_value() ) + if (other.has_value()) { - m_data.template emplace< 0 >( other.get() ); + m_data.template emplace<0>(other.get()); } else { - m_data.template emplace< 1 >( detail::Empty() ); + m_data.template emplace<1>(detail::Empty()); } return *this; } - bool - operator==( Option const & other ) const + bool operator==(Option const &other) const { - if( has_value() ) + if (has_value()) { return !other.has_value(); } else { - if( !other.has_value() ) + if (!other.has_value()) { return false; } @@ -114,17 +107,15 @@ namespace auxiliary } } - bool - operator!=( Option const & other ) const + bool operator!=(Option const &other) const { - return !( *this == other ); + return !(*this == other); } /** * @return Is an object constantly stored in this? */ - bool - has_value() const + bool has_value() const { return m_data.index() == 0; } @@ -143,10 +134,9 @@ namespace auxiliary * @throw std::bad_variant_access if no object is present. * @return The emplaced object. */ - T const & - get() const + T const &get() const { - return variantSrc::template get< T >( m_data ); + return variantSrc::template get(m_data); } /** @@ -155,19 +145,16 @@ namespace auxiliary * @throw std::bad_variant_access if no object is present. * @return The emplaced object. */ - T & - get() + T &get() { - return variantSrc::template get< T >( m_data ); + return variantSrc::template get(m_data); } }; - template< typename T > - Option< typename std::decay< T >::type > - makeOption( T && val ) + template + Option::type> makeOption(T &&val) { - return Option< typename std::decay< T >::type >( - std::forward< T >( val ) ); + return Option::type>(std::forward(val)); } } // namespace auxiliary } // namespace openPMD diff --git a/include/openPMD/auxiliary/OutOfRangeMsg.hpp b/include/openPMD/auxiliary/OutOfRangeMsg.hpp index bc0d96593d..30d6cf0763 100644 --- a/include/openPMD/auxiliary/OutOfRangeMsg.hpp +++ b/include/openPMD/auxiliary/OutOfRangeMsg.hpp @@ -23,7 +23,6 @@ #include #include - namespace openPMD { namespace auxiliary @@ -40,40 +39,35 @@ namespace auxiliary std::string m_description; public: - OutOfRangeMsg() : - m_name( "Key" ), - m_description( "does not exist (read-only)." ) + OutOfRangeMsg() + : m_name("Key"), m_description("does not exist (read-only).") + {} + OutOfRangeMsg(std::string const name, std::string const description) + : m_name(name), m_description(description) {} - OutOfRangeMsg( - std::string const name, - std::string const description - ) : - m_name(name), m_description( description ) {} - template< + template < typename T_Key, typename = typename std::enable_if< - std::is_integral< T_Key >::value || - std::is_floating_point< T_Key >::value - >::type - > - std::string operator()( T_Key const key ) const + std::is_integral::value || + std::is_floating_point::value>::type> + std::string operator()(T_Key const key) const { - return m_name + std::string(" '") + std::to_string( key ) + - std::string( "' " ) + m_description; + return m_name + std::string(" '") + std::to_string(key) + + std::string("' ") + m_description; } - std::string operator()( std::string const key ) const + std::string operator()(std::string const key) const { - return m_name + std::string(" '") + std::string( key ) + - std::string( "' " ) + m_description; + return m_name + std::string(" '") + std::string(key) + + std::string("' ") + m_description; } - std::string operator()( ... ) const + std::string operator()(...) const { - return m_name + std::string( " " ) + m_description; + return m_name + std::string(" ") + m_description; } }; -} // auxiliary -} // openPMD +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/ShareRaw.hpp b/include/openPMD/auxiliary/ShareRaw.hpp index 1e1dc636ef..e3d2d1efb7 100644 --- a/include/openPMD/auxiliary/ShareRaw.hpp +++ b/include/openPMD/auxiliary/ShareRaw.hpp @@ -20,56 +20,53 @@ */ #pragma once -#include -#include #include +#include #include - +#include namespace openPMD { - //! @{ - /** Share ownership with a raw pointer - * - * Helper function to share load/store data ownership - * unprotected and without reference counting with a - * raw pointer or stdlib container (that implements a - * contiguous data storage). - * - * @warning this is a helper function to bypass the shared-pointer - * API for storing data behind raw pointers. Using it puts - * the responsibility of buffer-consistency between stores - * and flushes to the users side without an indication via - * reference counting. - */ - template< typename T > - std::shared_ptr< T > - shareRaw( T * x ) - { - return std::shared_ptr< T >( x, [](T *){} ); - } +//! @{ +/** Share ownership with a raw pointer + * + * Helper function to share load/store data ownership + * unprotected and without reference counting with a + * raw pointer or stdlib container (that implements a + * contiguous data storage). + * + * @warning this is a helper function to bypass the shared-pointer + * API for storing data behind raw pointers. Using it puts + * the responsibility of buffer-consistency between stores + * and flushes to the users side without an indication via + * reference counting. + */ +template +std::shared_ptr shareRaw(T *x) +{ + return std::shared_ptr(x, [](T *) {}); +} - template< typename T > - std::shared_ptr< T const > - shareRaw( T const * x ) - { - return std::shared_ptr< T const >( x, [](T const *){} ); - } +template +std::shared_ptr shareRaw(T const *x) +{ + return std::shared_ptr(x, [](T const *) {}); +} - template< typename T > - auto - shareRaw( T & c ) -> std::shared_ptr< typename std::remove_pointer< decltype( c.data() ) >::type > - { - using value_type = typename std::remove_pointer< decltype( c.data() ) >::type; - return std::shared_ptr< value_type >( c.data(), [](value_type *){} ); - } +template +auto shareRaw(T &c) + -> std::shared_ptr::type> +{ + using value_type = typename std::remove_pointer::type; + return std::shared_ptr(c.data(), [](value_type *) {}); +} - template< typename T > - auto - shareRaw( T const & c ) -> std::shared_ptr< typename std::remove_pointer< decltype( c.data() ) >::type > - { - using value_type = typename std::remove_pointer< decltype( c.data() ) >::type; - return std::shared_ptr< value_type >( c.data(), [](value_type *){} ); - } - //! @} -} // openPMD +template +auto shareRaw(T const &c) + -> std::shared_ptr::type> +{ + using value_type = typename std::remove_pointer::type; + return std::shared_ptr(c.data(), [](value_type *) {}); +} +//! @} +} // namespace openPMD diff --git a/include/openPMD/auxiliary/StringManip.hpp b/include/openPMD/auxiliary/StringManip.hpp index c7a00eb634..cd4a8b9381 100644 --- a/include/openPMD/auxiliary/StringManip.hpp +++ b/include/openPMD/auxiliary/StringManip.hpp @@ -21,245 +21,217 @@ #pragma once #include +#include #include #include #include #include -#include - namespace openPMD { namespace auxiliary { -inline bool -contains(std::string const &s, - std::string const &infix) -{ - return s.find(infix) != std::string::npos; -} - -inline bool -contains(std::string const &s, - char const infix) -{ - return s.find(infix) != std::string::npos; -} + inline bool contains(std::string const &s, std::string const &infix) + { + return s.find(infix) != std::string::npos; + } -inline bool -starts_with(std::string const &s, - std::string const &prefix) -{ - return (s.size() >= prefix.size()) && - (0 == s.compare(0, prefix.size(), prefix)); -} + inline bool contains(std::string const &s, char const infix) + { + return s.find(infix) != std::string::npos; + } -inline bool -starts_with(std::string const &s, - char const prefix) -{ - return !s.empty() && - s[0] == prefix; -} + inline bool starts_with(std::string const &s, std::string const &prefix) + { + return (s.size() >= prefix.size()) && + (0 == s.compare(0, prefix.size(), prefix)); + } -inline bool -ends_with(std::string const &s, - std::string const &suffix) -{ - return (s.size() >= suffix.size()) && - (0 == s.compare(s.size() - suffix.size(), suffix.size(), suffix)); -} + inline bool starts_with(std::string const &s, char const prefix) + { + return !s.empty() && s[0] == prefix; + } -inline bool -ends_with(std::string const &s, - char const suffix) -{ - return !s.empty() && s.back() == suffix; -} + inline bool ends_with(std::string const &s, std::string const &suffix) + { + return (s.size() >= suffix.size()) && + (0 == s.compare(s.size() - suffix.size(), suffix.size(), suffix)); + } -inline std::string -replace_first(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = s.find(target); - if( pos == std::string::npos ) - return s; - s.replace(pos, target.size(), replacement); - s.shrink_to_fit(); + inline bool ends_with(std::string const &s, char const suffix) + { + return !s.empty() && s.back() == suffix; + } - return s; -} + inline std::string replace_first( + std::string s, + std::string const &target, + std::string const &replacement) + { + std::string::size_type pos = s.find(target); + if (pos == std::string::npos) + return s; + s.replace(pos, target.size(), replacement); + s.shrink_to_fit(); -inline std::string -replace_last(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = s.rfind(target); - if( pos == std::string::npos ) return s; - s.replace(pos, target.size(), replacement); - s.shrink_to_fit(); - - return s; -} + } -inline std::string -replace_all_nonrecursively(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = 0; - auto tsize = target.size(); - auto rsize = replacement.size(); - while (true) + inline std::string replace_last( + std::string s, + std::string const &target, + std::string const &replacement) { - pos = s.find(target, pos); + std::string::size_type pos = s.rfind(target); if (pos == std::string::npos) - break; - s.replace(pos, tsize, replacement); - pos += rsize; + return s; + s.replace(pos, target.size(), replacement); + s.shrink_to_fit(); + + return s; } - s.shrink_to_fit(); - return s; -} -inline std::string -replace_all(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = 0; - auto tsize = target.size(); - assert(tsize > 0); - auto rsize = replacement.size(); - while (true) + inline std::string replace_all_nonrecursively( + std::string s, + std::string const &target, + std::string const &replacement) { - pos = s.find(target, pos); - if (pos == std::string::npos) - break; - s.replace(pos, tsize, replacement); - // Allow replacing recursively, but only if - // the next replaced substring overlaps with - // some parts of the original word. - // This avoids loops. - pos += rsize - std::min(tsize - 1, rsize); + std::string::size_type pos = 0; + auto tsize = target.size(); + auto rsize = replacement.size(); + while (true) + { + pos = s.find(target, pos); + if (pos == std::string::npos) + break; + s.replace(pos, tsize, replacement); + pos += rsize; + } + s.shrink_to_fit(); + return s; } - s.shrink_to_fit(); - return s; -} -inline std::vector< std::string > -split(std::string const &s, - std::string const &delimiter, - bool includeDelimiter = false) -{ - std::vector< std::string > ret; - std::string::size_type pos, lastPos = 0, length = s.size(); - while( lastPos < length + 1 ) + inline std::string replace_all( + std::string s, + std::string const &target, + std::string const &replacement) { - pos = s.find_first_of(delimiter, lastPos); - if( pos == std::string::npos ) + std::string::size_type pos = 0; + auto tsize = target.size(); + assert(tsize > 0); + auto rsize = replacement.size(); + while (true) { - pos = length; - includeDelimiter = false; + pos = s.find(target, pos); + if (pos == std::string::npos) + break; + s.replace(pos, tsize, replacement); + // Allow replacing recursively, but only if + // the next replaced substring overlaps with + // some parts of the original word. + // This avoids loops. + pos += rsize - std::min(tsize - 1, rsize); } + s.shrink_to_fit(); + return s; + } - if( pos != lastPos ) - ret.push_back(s.substr(lastPos, pos + (includeDelimiter ? delimiter.size() : 0) - lastPos)); + inline std::vector split( + std::string const &s, + std::string const &delimiter, + bool includeDelimiter = false) + { + std::vector ret; + std::string::size_type pos, lastPos = 0, length = s.size(); + while (lastPos < length + 1) + { + pos = s.find_first_of(delimiter, lastPos); + if (pos == std::string::npos) + { + pos = length; + includeDelimiter = false; + } + + if (pos != lastPos) + ret.push_back(s.substr( + lastPos, + pos + (includeDelimiter ? delimiter.size() : 0) - lastPos)); + + lastPos = pos + 1; + } - lastPos = pos + 1; + return ret; } - return ret; -} - -inline std::string -strip(std::string s, std::vector< char > to_remove) -{ - for( auto const& c : to_remove ) - s.erase(std::remove(s.begin(), s.end(), c), s.end()); - s.shrink_to_fit(); + inline std::string strip(std::string s, std::vector to_remove) + { + for (auto const &c : to_remove) + s.erase(std::remove(s.begin(), s.end(), c), s.end()); + s.shrink_to_fit(); - return s; -} + return s; + } -template< typename F > -std::string -trim( std::string const & s, F && to_remove ) -{ - auto begin = s.begin(); - for( ; begin != s.end(); ++begin ) + template + std::string trim(std::string const &s, F &&to_remove) { - if( !to_remove( *begin ) ) + auto begin = s.begin(); + for (; begin != s.end(); ++begin) { - break; + if (!to_remove(*begin)) + { + break; + } } - } - auto end = s.rbegin(); - for( ; end != s.rend(); ++end ) - { - if( !to_remove( *end ) ) + auto end = s.rbegin(); + for (; end != s.rend(); ++end) { - break; + if (!to_remove(*end)) + { + break; + } } + return s.substr(begin - s.begin(), end.base() - begin); } - return s.substr( begin - s.begin(), end.base() - begin ); -} -inline std::string -join(std::vector< std::string > const& vs, std::string const& delimiter) -{ - switch( vs.size() ) + inline std::string + join(std::vector const &vs, std::string const &delimiter) { + switch (vs.size()) + { case 0: return ""; case 1: return vs[0]; default: std::ostringstream ss; - std::copy(vs.begin(), - vs.end() - 1, - std::ostream_iterator< std::string >(ss, delimiter.c_str())); + std::copy( + vs.begin(), + vs.end() - 1, + std::ostream_iterator(ss, delimiter.c_str())); ss << *(vs.end() - 1); return ss.str(); + } } -} -/** - * @brief Remove surrounding slashes from a string. - * - * @param s A string, possibly with a slash as first and/or last letter. - * @return std::string The same string without those slashes. - */ -inline std::string -removeSlashes( std::string s ) -{ - if( auxiliary::starts_with( - s, - '/' - ) ) - { - s = auxiliary::replace_first( - s, - "/", - "" - ); - } - if( auxiliary::ends_with( - s, - '/' - ) ) + /** + * @brief Remove surrounding slashes from a string. + * + * @param s A string, possibly with a slash as first and/or last letter. + * @return std::string The same string without those slashes. + */ + inline std::string removeSlashes(std::string s) { - s = auxiliary::replace_last( - s, - "/", - "" - ); + if (auxiliary::starts_with(s, '/')) + { + s = auxiliary::replace_first(s, "/", ""); + } + if (auxiliary::ends_with(s, '/')) + { + s = auxiliary::replace_last(s, "/", ""); + } + return s; } - return s; -} -} // auxiliary -} // openPMD +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/Unused.hpp b/include/openPMD/auxiliary/Unused.hpp index 971760151e..35d3344195 100644 --- a/include/openPMD/auxiliary/Unused.hpp +++ b/include/openPMD/auxiliary/Unused.hpp @@ -21,15 +21,14 @@ */ #pragma once - #if __cplusplus >= 201703L -# define OPENPMDAPI_UNUSED [[maybe_unused]] +#define OPENPMDAPI_UNUSED [[maybe_unused]] +#else +#ifdef __clang__ +#define OPENPMDAPI_UNUSED __attribute__((unused)) +#elif defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) +#define OPENPMDAPI_UNUSED [[gnu::unused]] #else -# ifdef __clang__ -# define OPENPMDAPI_UNUSED __attribute__((unused)) -# elif defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) -# define OPENPMDAPI_UNUSED [[gnu::unused]] -# else -# define OPENPMDAPI_UNUSED -# endif +#define OPENPMDAPI_UNUSED +#endif #endif diff --git a/include/openPMD/auxiliary/Variant.hpp b/include/openPMD/auxiliary/Variant.hpp index e1eab20388..b6a6538723 100644 --- a/include/openPMD/auxiliary/Variant.hpp +++ b/include/openPMD/auxiliary/Variant.hpp @@ -25,69 +25,71 @@ #include #include - namespace openPMD { namespace auxiliary { -/** Generic object to store a set of datatypes in without losing type safety. - * - * @tparam T_DTYPES Enumeration of datatypes to be stored and identified. - * @tparam T Varaidic template argument list of datatypes to be stored. - */ -template< class T_DTYPES, typename ... T > -class Variant -{ - static_assert(std::is_enum< T_DTYPES >::value, "Datatypes to Variant must be supplied as enum."); - -public: - using resource = variantSrc::variant< T ... >; - /** Construct a lightweight wrapper around a generic object that indicates - * the concrete datatype of the specific object stored. - * - * @note Gerneric objects can only generated implicitly if their datatype - * is contained in T_DTYPES. - * @param r Generic object to be stored. - */ - Variant(resource r) - : dtype{static_cast(r.index())}, - m_data{r} - { } - - /** Retrieve a stored specific object of known datatype with ensured type-safety. + /** Generic object to store a set of datatypes in without losing type + * safety. * - * @throw std::bad_variant_access if stored object is not of type U. - * @tparam U Type of the object to be retrieved. - * @return Copy of the retrieved object of type U. + * @tparam T_DTYPES Enumeration of datatypes to be stored and identified. + * @tparam T Varaidic template argument list of datatypes to be + * stored. */ - template< typename U > - U get() const + template + class Variant { - return variantSrc::get< U >(m_data); - } + static_assert( + std::is_enum::value, + "Datatypes to Variant must be supplied as enum."); - /** Retrieve the stored generic object. - * - * @return Copy of the stored generic object. - */ - resource getResource() const - { - return m_data; - } + public: + using resource = variantSrc::variant; + /** Construct a lightweight wrapper around a generic object that + * indicates the concrete datatype of the specific object stored. + * + * @note Gerneric objects can only generated implicitly if their + * datatype is contained in T_DTYPES. + * @param r Generic object to be stored. + */ + Variant(resource r) : dtype{static_cast(r.index())}, m_data{r} + {} - /** Retrieve the index of the alternative that is currently been held - * - * @return zero-based index - */ - constexpr size_t index() const noexcept - { - return m_data.index(); - } + /** Retrieve a stored specific object of known datatype with ensured + * type-safety. + * + * @throw std::bad_variant_access if stored object is not of type U. + * @tparam U Type of the object to be retrieved. + * @return Copy of the retrieved object of type U. + */ + template + U get() const + { + return variantSrc::get(m_data); + } + + /** Retrieve the stored generic object. + * + * @return Copy of the stored generic object. + */ + resource getResource() const + { + return m_data; + } + + /** Retrieve the index of the alternative that is currently been held + * + * @return zero-based index + */ + constexpr size_t index() const noexcept + { + return m_data.index(); + } - T_DTYPES dtype; + T_DTYPES dtype; -private: - resource m_data; -}; -} // auxiliary -} // openPMD + private: + resource m_data; + }; +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/VariantSrc.hpp b/include/openPMD/auxiliary/VariantSrc.hpp index 5b6eda1dfc..d5c64f96e6 100644 --- a/include/openPMD/auxiliary/VariantSrc.hpp +++ b/include/openPMD/auxiliary/VariantSrc.hpp @@ -23,13 +23,13 @@ #include "openPMD/config.hpp" #if openPMD_HAS_CXX17 -# include // IWYU pragma: export +#include // IWYU pragma: export namespace variantSrc = std; #else - // see: https://github.com/mpark/variant/pull/76 -# if defined(__EXCEPTIONS) -# define MPARK_EXCEPTIONS -# endif -# include // IWYU pragma: export +// see: https://github.com/mpark/variant/pull/76 +#if defined(__EXCEPTIONS) +#define MPARK_EXCEPTIONS +#endif +#include // IWYU pragma: export namespace variantSrc = mpark; #endif diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index 157d732345..f6e9800ed1 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -21,31 +21,30 @@ #pragma once #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/OutOfRangeMsg.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" -#include "openPMD/auxiliary/OutOfRangeMsg.hpp" +#include #include #include #include -#include #include -#include #include +#include // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected #endif - namespace openPMD { namespace traits { - template< typename T > + template struct GenerationPolicy; -} // traits +} // namespace traits class AbstractFilePosition; class AttributableInterface; class Iteration; @@ -57,53 +56,67 @@ namespace internal class no_such_attribute_error : public std::runtime_error { public: - no_such_attribute_error(std::string const& what_arg) - : std::runtime_error(what_arg) - { } - virtual ~no_such_attribute_error() { } + no_such_attribute_error(std::string const &what_arg) + : std::runtime_error(what_arg) + {} + virtual ~no_such_attribute_error() + {} }; namespace internal { -class AttributableData -{ - friend class openPMD::AttributableInterface; + class AttributableData + { + friend class openPMD::AttributableInterface; -public: - AttributableData(); - AttributableData( AttributableData const & ) = delete; - AttributableData( AttributableData && ) = delete; - virtual ~AttributableData() = default; + public: + AttributableData(); + AttributableData(AttributableData const &) = delete; + AttributableData(AttributableData &&) = delete; + virtual ~AttributableData() = default; - AttributableData & operator=( AttributableData const & ) = delete; - AttributableData & operator=( AttributableData && ) = delete; + AttributableData &operator=(AttributableData const &) = delete; + AttributableData &operator=(AttributableData &&) = delete; - using A_MAP = std::map< std::string, Attribute >; - Writable m_writable; + using A_MAP = std::map; + Writable m_writable; -private: - A_MAP m_attributes; -}; + private: + A_MAP m_attributes; + }; -/** Verify values of attributes in the frontend - * - * verify string attributes are not empty (backend restriction, e.g., HDF5) - */ -template< typename T > -inline void -attr_value_check( std::string const /* key */, T /* value */ ) -{ -} + enum class SetAttributeMode : char + { + WhileReadingAttributes, + FromPublicAPICall + }; -template< > -inline void -attr_value_check( std::string const key, std::string const value ) -{ - if( value.empty() ) - throw std::runtime_error( - "[setAttribute] Value for string attribute '" + key + - "' must not be empty!" ); -} + /** Verify values of attributes in the frontend + * + * verify string attributes are not empty (backend restriction, e.g., HDF5) + */ + template + inline void attr_value_check( + std::string const /* key */, T /* value */, SetAttributeMode) + {} + + template <> + inline void attr_value_check( + std::string const key, std::string const value, SetAttributeMode mode) + { + switch (mode) + { + case SetAttributeMode::FromPublicAPICall: + if (value.empty()) + throw std::runtime_error( + "[setAttribute] Value for string attribute '" + key + + "' must not be empty!"); + break; + case SetAttributeMode::WhileReadingAttributes: + // no checks while reading + break; + } + } } // namespace internal @@ -115,17 +128,13 @@ attr_value_check( std::string const key, std::string const value ) class AttributableInterface { // @todo remove unnecessary friend (wew that sounds bitter) - using A_MAP = std::map< std::string, Attribute >; - friend Writable* getWritable(AttributableInterface*); - template< typename T_elem > + using A_MAP = std::map; + friend Writable *getWritable(AttributableInterface *); + template friend class BaseRecord; - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; - template< typename T > + template friend struct traits::GenerationPolicy; friend class Iteration; friend class Series; @@ -134,16 +143,15 @@ class AttributableInterface friend class WriteIterations; protected: - internal::AttributableData * m_attri = nullptr; + internal::AttributableData *m_attri = nullptr; // Should not be called publicly, only by implementing classes - AttributableInterface( internal::AttributableData * ); - template< typename T > - AttributableInterface( T * attri ) + AttributableInterface(internal::AttributableData *); + template + AttributableInterface(T *attri) : AttributableInterface{ - static_cast< internal::AttributableData * >( attri ) } - { - } + static_cast(attri)} + {} public: virtual ~AttributableInterface() = default; @@ -162,32 +170,34 @@ class AttributableInterface * * @{ */ - template< typename T > - bool setAttribute(std::string const& key, T value); - bool setAttribute(std::string const& key, char const value[]); + template + bool setAttribute(std::string const &key, T value); + bool setAttribute(std::string const &key, char const value[]); /** @} */ /** Retrieve value of Attribute stored with provided key. * - * @throw no_such_attribute_error If no Attribute is currently stored with the provided key. + * @throw no_such_attribute_error If no Attribute is currently stored with + * the provided key. * @param key Key (i.e. name) of the Attribute to retrieve value for. * @return Stored Attribute in Variant form. */ - Attribute getAttribute(std::string const& key) const; + Attribute getAttribute(std::string const &key) const; /** Remove Attribute of provided value both logically and physically. * * @param key Key (i.e. name) of the Attribute to remove. - * @return true if provided key was present and removal succeeded, false otherwise. + * @return true if provided key was present and removal succeeded, false + * otherwise. */ - bool deleteAttribute(std::string const& key); + bool deleteAttribute(std::string const &key); /** List all currently stored Attributes' keys. * * @return Vector of keys (i.e. names) of all currently stored Attributes. */ - std::vector< std::string > attributes() const; + std::vector attributes() const; /** Count all currently stored Attributes. * * @return Number of currently stored Attributes. @@ -198,7 +208,7 @@ class AttributableInterface * @param key Key (i.e. name) of the Attribute to find. * @return true if provided key was present, false otherwise. */ - bool containsAttribute(std::string const& key) const; + bool containsAttribute(std::string const &key) const; /** Retrieve a user-supplied comment associated with the object. * @@ -206,12 +216,13 @@ class AttributableInterface * @return String containing the user-supplied comment. */ std::string comment() const; - /** Populate Attribute corresponding to a comment with the user-supplied comment. + /** Populate Attribute corresponding to a comment with the user-supplied + * comment. * * @param comment String value to be stored as a comment. * @return Reference to modified Attributable. */ - AttributableInterface& setComment(std::string const& comment); + AttributableInterface &setComment(std::string const &comment); /** Flush the corresponding Series object * @@ -230,8 +241,8 @@ class AttributableInterface */ struct MyPath { - std::string directory; //! e.g., samples/git-samples/ - std::string seriesName; //! e.g., data%T + std::string directory; //! e.g., samples/git-samples/ + std::string seriesName; //! e.g., data%T std::string seriesExtension; //! e.g., .bp, .h5, .json, ... /** A vector of openPMD object names * @@ -240,7 +251,7 @@ class AttributableInterface * "iterations", "100", "meshes", "E", "x" * Notice that RecordComponent::SCALAR is included in this list, too. */ - std::vector< std::string > group; + std::vector group; /** Reconstructs a path that can be passed to a Series constructor */ std::string filePath() const; @@ -253,10 +264,10 @@ class AttributableInterface */ MyPath myPath() const; -OPENPMD_protected: + OPENPMD_protected: internal::SeriesInternal const & retrieveSeries() const; - internal::SeriesInternal & retrieveSeries(); + internal::SeriesInternal &retrieveSeries(); /** Returns the corresponding Iteration * @@ -265,14 +276,22 @@ class AttributableInterface * Throws an error otherwise, e.g., for Series objects. * @{ */ - Iteration const & containingIteration() const; - Iteration & containingIteration(); + Iteration const &containingIteration() const; + Iteration &containingIteration(); /** @} */ - void seriesFlush( FlushLevel ); + void seriesFlush(internal::FlushParams); + + void flushAttributes(internal::FlushParams const &); - void flushAttributes(); - enum ReadMode { + template + bool setAttributeImpl( + std::string const &key, T value, internal::SetAttributeMode); + bool setAttributeImpl( + std::string const &key, char const value[], internal::SetAttributeMode); + + enum ReadMode + { /** * Don't read an attribute from the backend if it has been previously * read. @@ -289,72 +308,85 @@ class AttributableInterface */ FullyReread }; - void readAttributes( ReadMode ); + void readAttributes(ReadMode); - /** Retrieve the value of a floating point Attribute of user-defined precision with ensured type-safety. + /** Retrieve the value of a floating point Attribute of user-defined + * precision with ensured type-safety. * * @note Since the precision of certain Attributes is intentionally left - * unspecified in the openPMD standard, this provides a mechanism to - * retrieve those values without giving up type-safety. - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents + * unspecified in the openPMD standard, this provides a mechanism + * to retrieve those values without giving up type-safety. + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents * @note If the supplied and stored floating point precision are not the - * same, the value is cast to the desired precision unconditionally. + * same, the value is cast to the desired precision + * unconditionally. * - * @throw no_such_attribute_error If no Attribute is currently stored with the provided key. - * @tparam T Floating point type of user-defined precision to retrieve the value as. - * @param key Key (i.e. name) of the floating-point Attribute to retrieve value for. + * @throw no_such_attribute_error If no Attribute is currently stored with + * the provided key. + * @tparam T Floating point type of user-defined precision to retrieve + * the value as. + * @param key Key (i.e. name) of the floating-point Attribute to retrieve + * value for. * @return Value of stored Attribute as supplied floating point type. */ - template< typename T > - T readFloatingpoint(std::string const& key) const; - /** Retrieve a vector of values of a floating point Attributes of user-defined precision with ensured type-safety. + template + T readFloatingpoint(std::string const &key) const; + /** Retrieve a vector of values of a floating point Attributes of + * user-defined precision with ensured type-safety. * * @note Since the precision of certain Attributes is intentionally left - * unspecified in the openPMD standard, this provides a mechanism to - * retrieve those values without giving up type-safety. - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents + * unspecified in the openPMD standard, this provides a mechanism + * to retrieve those values without giving up type-safety. + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents * @note If the supplied and stored floating point precision are not the - * same, the values are cast to the desired precision unconditionally. + * same, the values are cast to the desired precision + * unconditionally. * - * @throw no_such_attribute_error If no Attribute is currently stored with the provided key. - * @tparam T Floating point type of user-defined precision to retrieve the values as. - * @param key Key (i.e. name) of the floating-point Attribute to retrieve values for. - * @return Vector of values of stored Attribute as supplied floating point type. + * @throw no_such_attribute_error If no Attribute is currently stored with + * the provided key. + * @tparam T Floating point type of user-defined precision to retrieve + * the values as. + * @param key Key (i.e. name) of the floating-point Attribute to retrieve + * values for. + * @return Vector of values of stored Attribute as supplied floating point + * type. */ - template< typename T > - std::vector< T > readVectorFloatingpoint(std::string const& key) const; + template + std::vector readVectorFloatingpoint(std::string const &key) const; /* views into the resources held by m_writable - * purely for convenience so code that uses these does not have to go through m_writable-> */ - AbstractIOHandler * IOHandler() + * purely for convenience so code that uses these does not have to go + * through m_writable-> */ + AbstractIOHandler *IOHandler() { return m_attri->m_writable.IOHandler.get(); } - AbstractIOHandler const * IOHandler() const + AbstractIOHandler const *IOHandler() const { return m_attri->m_writable.IOHandler.get(); } - Writable *& parent() + Writable *&parent() { return m_attri->m_writable.parent; } - Writable const * parent() const + Writable const *parent() const { return m_attri->m_writable.parent; } - Writable & writable() + Writable &writable() { return m_attri->m_writable; } - Writable const & writable() const + Writable const &writable() const { return m_attri->m_writable; } - inline - internal::AttributableData & get() + inline internal::AttributableData &get() { - if( m_attri ) + if (m_attri) { return *m_attri; } @@ -362,13 +394,12 @@ class AttributableInterface { throw std::runtime_error( "[AttributableInterface] " - "Cannot use default-constructed Attributable." ); + "Cannot use default-constructed Attributable."); } } - inline - internal::AttributableData const & get() const + inline internal::AttributableData const &get() const { - if( m_attri ) + if (m_attri) { return *m_attri; } @@ -376,14 +407,26 @@ class AttributableInterface { throw std::runtime_error( "[AttributableInterface] " - "Cannot use default-constructed Attributable." ); + "Cannot use default-constructed Attributable."); } } - bool dirty() const { return writable().dirty; } - bool& dirty() { return writable().dirty; } - bool written() const { return writable().written; } - bool& written() { return writable().written; } + bool dirty() const + { + return writable().dirty; + } + bool &dirty() + { + return writable().dirty; + } + bool written() const + { + return writable().written; + } + bool &written() + { + return writable().written; + } private: /** @@ -391,7 +434,7 @@ class AttributableInterface * * @param w The Writable representing the parent. */ - virtual void linkHierarchy(Writable& w); + virtual void linkHierarchy(Writable &w); }; // AttributableInterface // Alias this as Attributable since this is a public abstract parent class @@ -401,47 +444,63 @@ using Attributable = AttributableInterface; class LegacyAttributable : public AttributableInterface { protected: - std::shared_ptr< internal::AttributableData > m_attributableData = - std::make_shared< internal::AttributableData >(); + std::shared_ptr m_attributableData = + std::make_shared(); public: - LegacyAttributable() : AttributableInterface{ nullptr } + LegacyAttributable() : AttributableInterface{nullptr} { AttributableInterface::m_attri = m_attributableData.get(); } }; -//TODO explicitly instantiate Attributable::setAttribute for all T in Datatype -template< typename T > +template +inline bool AttributableInterface::setAttribute(std::string const &key, T value) +{ + return setAttributeImpl( + key, std::move(value), internal::SetAttributeMode::FromPublicAPICall); +} + inline bool -AttributableInterface::setAttribute( std::string const & key, T value ) +Attributable::setAttribute(std::string const &key, char const value[]) +{ + return setAttributeImpl( + key, value, internal::SetAttributeMode::FromPublicAPICall); +} + +// note: we explicitly instantiate Attributable::setAttributeImpl for all T in +// Datatype in Attributable.cpp +template +inline bool Attributable::setAttributeImpl( + std::string const &key, + T value, + internal::SetAttributeMode setAttributeMode) { - internal::attr_value_check( key, value ); + internal::attr_value_check(key, value, setAttributeMode); - auto & attri = get(); - if(IOHandler() && Access::READ_ONLY == IOHandler()->m_frontendAccess ) + auto &attri = get(); + if (IOHandler() && Access::READ_ONLY == IOHandler()->m_frontendAccess) { auxiliary::OutOfRangeMsg const out_of_range_msg( - "Attribute", - "can not be set (read-only)." - ); + "Attribute", "can not be set (read-only)."); throw no_such_attribute_error(out_of_range_msg(key)); } dirty() = true; auto it = attri.m_attributes.lower_bound(key); - if( it != attri.m_attributes.end() - && !attri.m_attributes.key_comp()(key, it->first) ) + if (it != attri.m_attributes.end() && + !attri.m_attributes.key_comp()(key, it->first)) { // key already exists in map, just replace the value // note: due to a C++17 issue with NVCC 11.0.2 we write the // T value to variant conversion explicitly // https://github.com/openPMD/openPMD-api/pull/1103 - //it->second = Attribute(std::move(value)); + // it->second = Attribute(std::move(value)); it->second = Attribute(Attribute::resource(std::move(value))); return true; - } else + } + else { // emplace a new map element for an unknown key attri.m_attributes.emplace_hint( @@ -450,26 +509,32 @@ AttributableInterface::setAttribute( std::string const & key, T value ) } } -inline bool -AttributableInterface::setAttribute( std::string const & key, char const value[] ) +inline bool Attributable::setAttributeImpl( + std::string const &key, + char const value[], + internal::SetAttributeMode setAttributeMode) { - return this->setAttribute(key, std::string(value)); + return this->setAttributeImpl(key, std::string(value), setAttributeMode); } -template< typename T > -inline T AttributableInterface::readFloatingpoint( std::string const & key ) const +template +inline T AttributableInterface::readFloatingpoint(std::string const &key) const { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); - return getAttribute(key).get< T >(); + return getAttribute(key).get(); } -template< typename T > -inline std::vector< T > -AttributableInterface::readVectorFloatingpoint( std::string const & key ) const +template +inline std::vector +AttributableInterface::readVectorFloatingpoint(std::string const &key) const { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); - return getAttribute(key).get< std::vector< T > >(); + return getAttribute(key).get >(); } } // namespace openPMD diff --git a/include/openPMD/backend/Attribute.hpp b/include/openPMD/backend/Attribute.hpp index 5471283662..ef7004a346 100644 --- a/include/openPMD/backend/Attribute.hpp +++ b/include/openPMD/backend/Attribute.hpp @@ -20,8 +20,8 @@ */ #pragma once -#include "openPMD/auxiliary/Variant.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/auxiliary/Variant.hpp" #include #include @@ -34,50 +34,63 @@ #include #include - namespace openPMD { -//TODO This might have to be a Writable -//Reasoning - Flushes are expected to be done often. -//Attributes should not be written unless dirty. -//At the moment the dirty check is done at Attributable level, -//resulting in all of an Attributables Attributes being written to disk even if only one changes -/** Varidic datatype supporting at least all formats for attributes specified in the openPMD standard. +// TODO This might have to be a Writable +// Reasoning - Flushes are expected to be done often. +// Attributes should not be written unless dirty. +// At the moment the dirty check is done at Attributable level, +// resulting in all of an Attributables Attributes being written to disk even if +// only one changes +/** Varidic datatype supporting at least all formats for attributes specified in + * the openPMD standard. * * @note Extending and/or modifying the available formats requires identical * modifications to Datatype. */ -class Attribute : - public auxiliary::Variant< Datatype, - char, unsigned char, // signed char, - short, int, long, long long, - unsigned short, unsigned int, unsigned long, unsigned long long, - float, double, long double, - std::complex< float >, std::complex< double >, std::complex< long double >, - std::string, - std::vector< char >, - std::vector< short >, - std::vector< int >, - std::vector< long >, - std::vector< long long >, - std::vector< unsigned char >, - std::vector< unsigned short >, - std::vector< unsigned int >, - std::vector< unsigned long >, - std::vector< unsigned long long >, - std::vector< float >, - std::vector< double >, - std::vector< long double >, - std::vector< std::complex< float > >, - std::vector< std::complex< double > >, - std::vector< std::complex< long double > >, - std::vector< std::string >, - std::array< double, 7 >, - bool > +class Attribute + : public auxiliary::Variant< + Datatype, + char, + unsigned char, // signed char, + short, + int, + long, + long long, + unsigned short, + unsigned int, + unsigned long, + unsigned long long, + float, + double, + long double, + std::complex, + std::complex, + std::complex, + std::string, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector >, + std::vector >, + std::vector >, + std::vector, + std::array, + bool> { public: Attribute(resource r) : Variant(std::move(r)) - { } + {} /** Retrieve a stored specific Attribute and cast if convertible. * @@ -88,73 +101,76 @@ class Attribute : * @tparam U Type of the object to be casted to. * @return Copy of the retrieved object, casted to type U. */ - template< typename U > + template U get() const; }; -template< typename T, typename U, bool isConvertible = std::is_convertible::value > +template < + typename T, + typename U, + bool isConvertible = std::is_convertible::value> struct DoConvert; -template< typename T, typename U > +template struct DoConvert { - U operator()( T * ) + U operator()(T *) { throw std::runtime_error("getCast: no cast possible."); } }; -template< typename T, typename U > +template struct DoConvert { - U operator()( T * pv ) + U operator()(T *pv) { - return static_cast< U >( *pv ); + return static_cast(*pv); } }; -template< typename T, typename U > -struct DoConvert, std::vector< U >, false> +template +struct DoConvert, std::vector, false> { static constexpr bool convertible = std::is_convertible::value; - template< typename UU = U > - auto operator()( std::vector< T > const * pv ) - -> typename std::enable_if< convertible, std::vector< UU > >::type + template + auto operator()(std::vector const *pv) -> + typename std::enable_if >::type { - std::vector< U > u; - u.reserve( pv->size() ); - std::copy( pv->begin(), pv->end(), std::back_inserter(u) ); + std::vector u; + u.reserve(pv->size()); + std::copy(pv->begin(), pv->end(), std::back_inserter(u)); return u; } - template< typename UU = U > - auto operator()( std::vector< T > const * ) - -> typename std::enable_if< !convertible, std::vector< UU > >::type + template + auto operator()(std::vector const *) -> + typename std::enable_if >::type { throw std::runtime_error("getCast: no vector cast possible."); } }; // conversion cast: turn a single value into a 1-element vector -template< typename T, typename U > -struct DoConvert, false> +template +struct DoConvert, false> { static constexpr bool convertible = std::is_convertible::value; - template< typename UU = U > - auto operator()( T const * pv ) - -> typename std::enable_if< convertible, std::vector< UU > >::type + template + auto operator()(T const *pv) -> + typename std::enable_if >::type { - std::vector< U > u; - u.reserve( 1 ); - u.push_back( static_cast< U >( *pv ) ); + std::vector u; + u.reserve(1); + u.push_back(static_cast(*pv)); return u; } - template< typename UU = U > - auto operator()( T const * ) - -> typename std::enable_if< !convertible, std::vector< UU > >::type + template + auto operator()(T const *) -> + typename std::enable_if >::type { throw std::runtime_error( "getCast: no scalar to vector conversion possible."); @@ -164,24 +180,24 @@ struct DoConvert, false> // conversion cast: array to vector // if a backend reports a std::array<> for something where the frontend expects // a vector -template< typename T, typename U, size_t n > -struct DoConvert, std::vector< U >, false> +template +struct DoConvert, std::vector, false> { static constexpr bool convertible = std::is_convertible::value; - template< typename UU = U > - auto operator()( std::array< T, n > const * pv ) - -> typename std::enable_if< convertible, std::vector< UU > >::type + template + auto operator()(std::array const *pv) -> + typename std::enable_if >::type { - std::vector< U > u; - u.reserve( n ); - std::copy( pv->begin(), pv->end(), std::back_inserter(u) ); + std::vector u; + u.reserve(n); + std::copy(pv->begin(), pv->end(), std::back_inserter(u)); return u; } - template< typename UU = U > - auto operator()( std::array< T, n > const * ) - -> typename std::enable_if< !convertible, std::vector< UU > >::type + template + auto operator()(std::array const *) -> + typename std::enable_if >::type { throw std::runtime_error( "getCast: no array to vector conversion possible."); @@ -191,32 +207,32 @@ struct DoConvert, std::vector< U >, false> // conversion cast: vector to array // if a backend reports a std::vector<> for something where the frontend expects // an array -template< typename T, typename U, size_t n > -struct DoConvert, std::array< U, n >, false> +template +struct DoConvert, std::array, false> { static constexpr bool convertible = std::is_convertible::value; - template< typename UU = U > - auto operator()( std::vector< T > const * pv ) - -> typename std::enable_if< convertible, std::array< UU, n > >::type + template + auto operator()(std::vector const *pv) -> + typename std::enable_if >::type { - std::array< U, n > u; - if( n != pv->size() ) + std::array u; + if (n != pv->size()) { throw std::runtime_error( "getCast: no vector to array conversion possible " "(wrong requested array size)."); } - for( size_t i = 0; i < n; ++i ) + for (size_t i = 0; i < n; ++i) { - u[ i ] = static_cast< U >( ( *pv )[ i ] ); + u[i] = static_cast((*pv)[i]); } return u; } - template< typename UU = U > - auto operator()( std::vector< T > const * ) - -> typename std::enable_if< !convertible, std::array< UU, n > >::type + template + auto operator()(std::vector const *) -> + typename std::enable_if >::type { throw std::runtime_error( "getCast: no vector to array conversion possible."); @@ -229,9 +245,8 @@ struct DoConvert, std::array< U, n >, false> * @tparam U Type of the object to be casted to. * @return Copy of the retrieved object, casted to type U. */ -template< typename U > -inline U -getCast( Attribute const & a ) +template +inline U getCast(Attribute const &a) { auto v = a.getResource(); @@ -241,97 +256,113 @@ getCast( Attribute const & a ) // also, once we switch to C++17, we might throw this out in // favor of a hopefully working std::visit #if defined(__ICC) || defined(__INTEL_COMPILER) - if(auto pvalue_c = variantSrc::get_if< char >( &v ) ) + if (auto pvalue_c = variantSrc::get_if(&v)) return DoConvert{}(pvalue_c); - else if(auto pvalue_uc = variantSrc::get_if< unsigned char >( &v ) ) + else if (auto pvalue_uc = variantSrc::get_if(&v)) return DoConvert{}(pvalue_uc); - else if(auto pvalue_s = variantSrc::get_if< short >( &v ) ) + else if (auto pvalue_s = variantSrc::get_if(&v)) return DoConvert{}(pvalue_s); - else if(auto pvalue_i = variantSrc::get_if< int >( &v ) ) + else if (auto pvalue_i = variantSrc::get_if(&v)) return DoConvert{}(pvalue_i); - else if(auto pvalue_l = variantSrc::get_if< long >( &v ) ) + else if (auto pvalue_l = variantSrc::get_if(&v)) return DoConvert{}(pvalue_l); - else if(auto pvalue_ll = variantSrc::get_if< long long >( &v ) ) + else if (auto pvalue_ll = variantSrc::get_if(&v)) return DoConvert{}(pvalue_ll); - else if(auto pvalue_us = variantSrc::get_if< unsigned short >( &v ) ) + else if (auto pvalue_us = variantSrc::get_if(&v)) return DoConvert{}(pvalue_us); - else if(auto pvalue_ui = variantSrc::get_if< unsigned int >( &v ) ) + else if (auto pvalue_ui = variantSrc::get_if(&v)) return DoConvert{}(pvalue_ui); - else if(auto pvalue_ul = variantSrc::get_if< unsigned long >( &v ) ) + else if (auto pvalue_ul = variantSrc::get_if(&v)) return DoConvert{}(pvalue_ul); - else if(auto pvalue_ull = variantSrc::get_if< unsigned long long >( &v ) ) + else if (auto pvalue_ull = variantSrc::get_if(&v)) return DoConvert{}(pvalue_ull); - else if(auto pvalue_f = variantSrc::get_if< float >( &v ) ) + else if (auto pvalue_f = variantSrc::get_if(&v)) return DoConvert{}(pvalue_f); - else if(auto pvalue_d = variantSrc::get_if< double >( &v ) ) + else if (auto pvalue_d = variantSrc::get_if(&v)) return DoConvert{}(pvalue_d); - else if(auto pvalue_ld = variantSrc::get_if< long double >( &v ) ) + else if (auto pvalue_ld = variantSrc::get_if(&v)) return DoConvert{}(pvalue_ld); - else if(auto pvalue_cf = variantSrc::get_if< std::complex< float > >( &v ) ) - return DoConvert, U>{}(pvalue_cf); - else if(auto pvalue_cd = variantSrc::get_if< std::complex< double > >( &v ) ) - return DoConvert, U>{}(pvalue_cd); - else if(auto pvalue_cld = variantSrc::get_if< std::complex< long double > >( &v ) ) - return DoConvert, U>{}(pvalue_cld); - else if(auto pvalue_str = variantSrc::get_if< std::string >( &v ) ) + else if (auto pvalue_cf = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_cf); + else if (auto pvalue_cd = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_cd); + else if ( + auto pvalue_cld = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_cld); + else if (auto pvalue_str = variantSrc::get_if(&v)) return DoConvert{}(pvalue_str); // vector - else if(auto pvalue_vc = variantSrc::get_if< std::vector< char > >( &v ) ) - return DoConvert, U>{}(pvalue_vc); - else if(auto pvalue_vuc = variantSrc::get_if< std::vector< unsigned char > >( &v ) ) - return DoConvert, U>{}(pvalue_vuc); - else if(auto pvalue_vs = variantSrc::get_if< std::vector< short > >( &v ) ) - return DoConvert, U>{}(pvalue_vs); - else if(auto pvalue_vi = variantSrc::get_if< std::vector< int > >( &v ) ) - return DoConvert, U>{}(pvalue_vi); - else if(auto pvalue_vl = variantSrc::get_if< std::vector< long > >( &v ) ) - return DoConvert, U>{}(pvalue_vl); - else if(auto pvalue_vll = variantSrc::get_if< std::vector< long long > >( &v ) ) - return DoConvert, U>{}(pvalue_vll); - else if(auto pvalue_vus = variantSrc::get_if< std::vector< unsigned short > >( &v ) ) - return DoConvert, U>{}(pvalue_vus); - else if(auto pvalue_vui = variantSrc::get_if< std::vector< unsigned int > >( &v ) ) - return DoConvert, U>{}(pvalue_vui); - else if(auto pvalue_vul = variantSrc::get_if< std::vector< unsigned long > >( &v ) ) - return DoConvert, U>{}(pvalue_vul); - else if(auto pvalue_vull = variantSrc::get_if< std::vector< unsigned long long > >( &v ) ) - return DoConvert, U>{}(pvalue_vull); - else if(auto pvalue_vf = variantSrc::get_if< std::vector< float > >( &v ) ) - return DoConvert, U>{}(pvalue_vf); - else if(auto pvalue_vd = variantSrc::get_if< std::vector< double > >( &v ) ) - return DoConvert, U>{}(pvalue_vd); - else if(auto pvalue_vld = variantSrc::get_if< std::vector< long double > >( &v ) ) - return DoConvert, U>{}(pvalue_vld); - else if(auto pvalue_vcf = variantSrc::get_if< std::vector< std::complex< float > > >( &v ) ) - return DoConvert >, U>{}(pvalue_vcf); - else if(auto pvalue_vcd = variantSrc::get_if< std::vector< std::complex< double > > >( &v ) ) - return DoConvert >, U>{}(pvalue_vcd); - else if(auto pvalue_vcld = variantSrc::get_if< std::vector< std::complex< long double > > >( &v ) ) - return DoConvert >, U>{}(pvalue_vcld); - else if(auto pvalue_vstr = variantSrc::get_if< std::vector< std::string > >( &v ) ) - return DoConvert, U>{}(pvalue_vstr); + else if (auto pvalue_vc = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vc); + else if ( + auto pvalue_vuc = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vuc); + else if (auto pvalue_vs = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vs); + else if (auto pvalue_vi = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vi); + else if (auto pvalue_vl = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vl); + else if (auto pvalue_vll = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vll); + else if ( + auto pvalue_vus = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vus); + else if ( + auto pvalue_vui = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vui); + else if ( + auto pvalue_vul = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vul); + else if ( + auto pvalue_vull = + variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vull); + else if (auto pvalue_vf = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vf); + else if (auto pvalue_vd = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vd); + else if ( + auto pvalue_vld = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vld); + else if ( + auto pvalue_vcf = + variantSrc::get_if > >(&v)) + return DoConvert >, U>{}(pvalue_vcf); + else if ( + auto pvalue_vcd = + variantSrc::get_if > >(&v)) + return DoConvert >, U>{}(pvalue_vcd); + else if ( + auto pvalue_vcld = + variantSrc::get_if > >(&v)) + return DoConvert >, U>{}( + pvalue_vcld); + else if ( + auto pvalue_vstr = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vstr); // extra - else if(auto pvalue_vad = variantSrc::get_if< std::array< double, 7 > >( &v ) ) - return DoConvert, U>{}(pvalue_vad); - else if(auto pvalue_b = variantSrc::get_if< bool >( &v ) ) + else if (auto pvalue_vad = variantSrc::get_if >(&v)) + return DoConvert, U>{}(pvalue_vad); + else if (auto pvalue_b = variantSrc::get_if(&v)) return DoConvert{}(pvalue_b); else throw std::runtime_error("getCast: unknown Datatype."); #else return variantSrc::visit( - []( auto && containedValue ) -> U { - using containedType = std::decay_t< decltype( containedValue ) >; - return DoConvert< containedType, U >{}( &containedValue ); + [](auto &&containedValue) -> U { + using containedType = std::decay_t; + return DoConvert{}(&containedValue); }, - v ); + v); #endif } -template< typename U > +template U Attribute::get() const { - return getCast< U >( Variant::getResource() ); + return getCast(Variant::getResource()); } } // namespace openPMD diff --git a/include/openPMD/backend/BaseRecord.hpp b/include/openPMD/backend/BaseRecord.hpp index d37f861908..4e7e5d70bb 100644 --- a/include/openPMD/backend/BaseRecord.hpp +++ b/include/openPMD/backend/BaseRecord.hpp @@ -20,45 +20,44 @@ */ #pragma once -#include "openPMD/backend/Container.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/UnitDimension.hpp" +#include "openPMD/backend/Container.hpp" #include -#include #include - +#include namespace openPMD { -template< typename T_elem > -class BaseRecord : public Container< T_elem > +template +class BaseRecord : public Container { friend class Iteration; friend class ParticleSpecies; public: - using key_type = typename Container< T_elem >::key_type; - using mapped_type = typename Container< T_elem >::mapped_type; - using value_type = typename Container< T_elem >::value_type; - using size_type = typename Container< T_elem >::size_type; - using difference_type = typename Container< T_elem >::difference_type; - using allocator_type = typename Container< T_elem >::allocator_type; - using reference = typename Container< T_elem >::reference; - using const_reference = typename Container< T_elem >::const_reference; - using pointer = typename Container< T_elem >::pointer; - using const_pointer = typename Container< T_elem >::const_pointer; - using iterator = typename Container< T_elem >::iterator; - using const_iterator = typename Container< T_elem >::const_iterator; - - BaseRecord(BaseRecord const& b); - BaseRecord& operator=(BaseRecord const& b); + using key_type = typename Container::key_type; + using mapped_type = typename Container::mapped_type; + using value_type = typename Container::value_type; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using allocator_type = typename Container::allocator_type; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using pointer = typename Container::pointer; + using const_pointer = typename Container::const_pointer; + using iterator = typename Container::iterator; + using const_iterator = typename Container::const_iterator; + + BaseRecord(BaseRecord const &b); + BaseRecord &operator=(BaseRecord const &b); virtual ~BaseRecord() = default; - mapped_type& operator[](key_type const& key) override; - mapped_type& operator[](key_type&& key) override; - size_type erase(key_type const& key) override; + mapped_type &operator[](key_type const &key) override; + mapped_type &operator[](key_type &&key) override; + size_type erase(key_type const &key) override; iterator erase(iterator res) override; //! @todo add also, as soon as added in Container: // iterator erase(const_iterator first, const_iterator last) override; @@ -74,12 +73,14 @@ class BaseRecord : public Container< T_elem > * (ISQ). * * @see https://en.wikipedia.org/wiki/Dimensional_analysis - * @see https://en.wikipedia.org/wiki/International_System_of_Quantities#Base_quantities - * @see https://github.com/openPMD/openPMD-standard/blob/1.1.0/STANDARD.md#required-for-each-record + * @see + * https://en.wikipedia.org/wiki/International_System_of_Quantities#Base_quantities + * @see + * https://github.com/openPMD/openPMD-standard/blob/1.1.0/STANDARD.md#required-for-each-record * * @return powers of the 7 base measures in the order specified above */ - std::array< double, 7 > unitDimension() const; + std::array unitDimension() const; /** Returns true if this record only contains a single component * @@ -91,11 +92,12 @@ class BaseRecord : public Container< T_elem > BaseRecord(); void readBase(); - std::shared_ptr< bool > m_containsScalar; + std::shared_ptr m_containsScalar; private: - void flush(std::string const&) final; - virtual void flush_impl(std::string const&) = 0; + void flush(std::string const &, internal::FlushParams const &) final; + virtual void + flush_impl(std::string const &, internal::FlushParams const &) = 0; virtual void read() = 0; /** @@ -106,50 +108,49 @@ class BaseRecord : public Container< T_elem > * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; }; // BaseRecord +template +BaseRecord::BaseRecord(BaseRecord const &b) + : Container(b), m_containsScalar{b.m_containsScalar} +{} -template< typename T_elem > -BaseRecord< T_elem >::BaseRecord(BaseRecord const& b) - : Container< T_elem >(b), - m_containsScalar{b.m_containsScalar} -{ } - -template< typename T_elem > -BaseRecord< T_elem >& BaseRecord< T_elem >::operator=(openPMD::BaseRecord const& b) { - Container< T_elem >::operator=( b ); +template +BaseRecord & +BaseRecord::operator=(openPMD::BaseRecord const &b) +{ + Container::operator=(b); m_containsScalar = b.m_containsScalar; return *this; } -template< typename T_elem > -BaseRecord< T_elem >::BaseRecord() - : Container< T_elem >(), - m_containsScalar{std::make_shared< bool >(false)} +template +BaseRecord::BaseRecord() + : Container(), m_containsScalar{std::make_shared(false)} { - this->setAttribute("unitDimension", - std::array< double, 7 >{{0., 0., 0., 0., 0., 0., 0.}}); + this->setAttribute( + "unitDimension", std::array{{0., 0., 0., 0., 0., 0., 0.}}); } - -template< typename T_elem > -inline typename BaseRecord< T_elem >::mapped_type& -BaseRecord< T_elem >::operator[](key_type const& key) +template +inline typename BaseRecord::mapped_type & +BaseRecord::operator[](key_type const &key) { auto it = this->find(key); - if( it != this->end() ) + if (it != this->end()) return it->second; else { bool const keyScalar = (key == RecordComponent::SCALAR); - if( (keyScalar && !Container< T_elem >::empty() && !scalar()) || (scalar() && !keyScalar) ) - throw std::runtime_error("A scalar component can not be contained at " - "the same time as one or more regular components."); - - mapped_type& ret = Container< T_elem >::operator[](key); - if( keyScalar ) + if ((keyScalar && !Container::empty() && !scalar()) || + (scalar() && !keyScalar)) + throw std::runtime_error( + "A scalar component can not be contained at " + "the same time as one or more regular components."); + + mapped_type &ret = Container::operator[](key); + if (keyScalar) { *m_containsScalar = true; ret.parent() = this->parent(); @@ -158,22 +159,24 @@ BaseRecord< T_elem >::operator[](key_type const& key) } } -template< typename T_elem > -inline typename BaseRecord< T_elem >::mapped_type& -BaseRecord< T_elem >::operator[](key_type&& key) +template +inline typename BaseRecord::mapped_type & +BaseRecord::operator[](key_type &&key) { auto it = this->find(key); - if( it != this->end() ) + if (it != this->end()) return it->second; else { bool const keyScalar = (key == RecordComponent::SCALAR); - if( (keyScalar && !Container< T_elem >::empty() && !scalar()) || (scalar() && !keyScalar) ) - throw std::runtime_error("A scalar component can not be contained at " - "the same time as one or more regular components."); - - mapped_type& ret = Container< T_elem >::operator[](std::move(key)); - if( keyScalar ) + if ((keyScalar && !Container::empty() && !scalar()) || + (scalar() && !keyScalar)) + throw std::runtime_error( + "A scalar component can not be contained at " + "the same time as one or more regular components."); + + mapped_type &ret = Container::operator[](std::move(key)); + if (keyScalar) { *m_containsScalar = true; ret.parent() = this->parent(); @@ -182,28 +185,28 @@ BaseRecord< T_elem >::operator[](key_type&& key) } } -template< typename T_elem > -inline typename BaseRecord< T_elem >::size_type -BaseRecord< T_elem >::erase(key_type const& key) +template +inline typename BaseRecord::size_type +BaseRecord::erase(key_type const &key) { bool const keyScalar = (key == RecordComponent::SCALAR); size_type res; - if( !keyScalar || (keyScalar && this->at(key).constant()) ) - res = Container< T_elem >::erase(key); + if (!keyScalar || (keyScalar && this->at(key).constant())) + res = Container::erase(key); else { - mapped_type& rc = this->find(RecordComponent::SCALAR)->second; - if( rc.written() ) + mapped_type &rc = this->find(RecordComponent::SCALAR)->second; + if (rc.written()) { - Parameter< Operation::DELETE_DATASET > dDelete; + Parameter dDelete; dDelete.name = "."; this->IOHandler()->enqueue(IOTask(&rc, dDelete)); - this->IOHandler()->flush(); + this->IOHandler()->flush(internal::defaultFlushParams); } - res = Container< T_elem >::erase(key); + res = Container::erase(key); } - if( keyScalar ) + if (keyScalar) { this->written() = false; this->writable().abstractFilePosition.reset(); @@ -212,28 +215,28 @@ BaseRecord< T_elem >::erase(key_type const& key) return res; } -template< typename T_elem > -inline typename BaseRecord< T_elem >::iterator -BaseRecord< T_elem >::erase(iterator res) +template +inline typename BaseRecord::iterator +BaseRecord::erase(iterator res) { bool const keyScalar = (res->first == RecordComponent::SCALAR); iterator ret; - if( !keyScalar || (keyScalar && this->at(res->first).constant()) ) - ret = Container< T_elem >::erase(res); + if (!keyScalar || (keyScalar && this->at(res->first).constant())) + ret = Container::erase(res); else { - mapped_type& rc = this->find(RecordComponent::SCALAR)->second; - if( rc.written() ) + mapped_type &rc = this->find(RecordComponent::SCALAR)->second; + if (rc.written()) { - Parameter< Operation::DELETE_DATASET > dDelete; + Parameter dDelete; dDelete.name = "."; this->IOHandler()->enqueue(IOTask(&rc, dDelete)); - this->IOHandler()->flush(); + this->IOHandler()->flush(internal::defaultFlushParams); } - ret = Container< T_elem >::erase(res); + ret = Container::erase(res); } - if( keyScalar ) + if (keyScalar) { this->written() = false; this->writable().abstractFilePosition.reset(); @@ -242,82 +245,89 @@ BaseRecord< T_elem >::erase(iterator res) return ret; } -template< typename T_elem > -inline std::array< double, 7 > -BaseRecord< T_elem >::unitDimension() const +template +inline std::array BaseRecord::unitDimension() const { - return this->getAttribute("unitDimension").template get< std::array< double, 7 > >(); + return this->getAttribute("unitDimension") + .template get >(); } -template< typename T_elem > -inline bool -BaseRecord< T_elem >::scalar() const +template +inline bool BaseRecord::scalar() const { return *m_containsScalar; } -template< typename T_elem > -inline void -BaseRecord< T_elem >::readBase() +template +inline void BaseRecord::readBase() { using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "unitDimension"; this->IOHandler()->enqueue(IOTask(this, aRead)); - this->IOHandler()->flush(); - if( *aRead.dtype == DT::ARR_DBL_7 ) - this->setAttribute("unitDimension", Attribute(*aRead.resource).template get< std::array< double, 7 > >()); - else if( *aRead.dtype == DT::VEC_DOUBLE ) + this->IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::ARR_DBL_7) + this->setAttribute( + "unitDimension", + Attribute(*aRead.resource).template get >()); + else if (*aRead.dtype == DT::VEC_DOUBLE) { - auto vec = Attribute(*aRead.resource).template get< std::vector< double > >(); - if( vec.size() == 7 ) + auto vec = + Attribute(*aRead.resource).template get >(); + if (vec.size() == 7) { - std::array< double, 7 > arr; - std::copy(vec.begin(), - vec.end(), - arr.begin()); + std::array arr; + std::copy(vec.begin(), vec.end(), arr.begin()); this->setAttribute("unitDimension", arr); - } else - throw std::runtime_error("Unexpected Attribute datatype for 'unitDimension'"); + } + else + throw std::runtime_error( + "Unexpected Attribute datatype for 'unitDimension'"); } else - throw std::runtime_error("Unexpected Attribute datatype for 'unitDimension'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'unitDimension'"); aRead.name = "timeOffset"; this->IOHandler()->enqueue(IOTask(this, aRead)); - this->IOHandler()->flush(); - if( *aRead.dtype == DT::FLOAT ) - this->setAttribute("timeOffset", Attribute(*aRead.resource).template get< float >()); - else if( *aRead.dtype == DT::DOUBLE ) - this->setAttribute("timeOffset", Attribute(*aRead.resource).template get< double >()); + this->IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::FLOAT) + this->setAttribute( + "timeOffset", Attribute(*aRead.resource).template get()); + else if (*aRead.dtype == DT::DOUBLE) + this->setAttribute( + "timeOffset", Attribute(*aRead.resource).template get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'timeOffset'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'timeOffset'"); } -template< typename T_elem > -inline void -BaseRecord< T_elem >::flush(std::string const& name) +template +inline void BaseRecord::flush( + std::string const &name, internal::FlushParams const &flushParams) { - if( !this->written() && this->empty() ) - throw std::runtime_error("A Record can not be written without any contained RecordComponents: " + name); + if (!this->written() && this->empty()) + throw std::runtime_error( + "A Record can not be written without any contained " + "RecordComponents: " + + name); - this->flush_impl(name); + this->flush_impl(name, flushParams); // flush_impl must take care to correctly set the dirty() flag so this // method doesn't do it } -template< typename T_elem > -inline bool -BaseRecord< T_elem >::dirtyRecursive() const +template +inline bool BaseRecord::dirtyRecursive() const { - if( this->dirty() ) + if (this->dirty()) { return true; } - for( auto const & pair : *this ) + for (auto const &pair : *this) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } diff --git a/include/openPMD/backend/BaseRecordComponent.hpp b/include/openPMD/backend/BaseRecordComponent.hpp index 86777b379d..92cb2c950a 100644 --- a/include/openPMD/backend/BaseRecordComponent.hpp +++ b/include/openPMD/backend/BaseRecordComponent.hpp @@ -20,33 +20,27 @@ */ #pragma once -#include "openPMD/backend/Attributable.hpp" #include "openPMD/Dataset.hpp" +#include "openPMD/backend/Attributable.hpp" // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected #endif - namespace openPMD { class BaseRecordComponent : public LegacyAttributable { - template< - typename T, - typename T_key, - typename T_container - > - friend - class Container; + template + friend class Container; public: ~BaseRecordComponent() = default; double unitSI() const; - BaseRecordComponent& resetDatatype(Datatype); + BaseRecordComponent &resetDatatype(Datatype); Datatype getDatatype() const; @@ -76,42 +70,39 @@ class BaseRecordComponent : public LegacyAttributable * may additionally wish to use to store user-defined, backend-independent * chunking information on particle datasets. */ - ChunkTable - availableChunks(); + ChunkTable availableChunks(); OPENPMD_protected : BaseRecordComponent(); - std::shared_ptr< Dataset > m_dataset; - std::shared_ptr< bool > m_isConstant; + std::shared_ptr m_dataset; + std::shared_ptr m_isConstant; }; // BaseRecordComponent namespace detail { -/** - * Functor template to be used in combination with switchType::operator() - * to set a default value for constant record components via the - * respective type's default constructor. - * Used to implement empty datasets in subclasses of BaseRecordComponent - * (currently RecordComponent). - * @param T_RecordComponent - */ -template< typename T_RecordComponent > -struct DefaultValue -{ - template< typename T > - void - operator()( T_RecordComponent & rc ) + /** + * Functor template to be used in combination with switchType::operator() + * to set a default value for constant record components via the + * respective type's default constructor. + * Used to implement empty datasets in subclasses of BaseRecordComponent + * (currently RecordComponent). + * @param T_RecordComponent + */ + template + struct DefaultValue { - rc.makeConstant( T() ); - } + template + void operator()(T_RecordComponent &rc) + { + rc.makeConstant(T()); + } - template< unsigned n, typename... Args > - void - operator()( Args &&... ) - { - throw std::runtime_error( - "makeEmpty: Datatype not supported by openPMD." ); - } -}; + template + void operator()(Args &&...) + { + throw std::runtime_error( + "makeEmpty: Datatype not supported by openPMD."); + } + }; } // namespace detail } // namespace openPMD diff --git a/include/openPMD/backend/Container.hpp b/include/openPMD/backend/Container.hpp index 509d6f230d..3ed44d4f04 100644 --- a/include/openPMD/backend/Container.hpp +++ b/include/openPMD/backend/Container.hpp @@ -32,10 +32,9 @@ // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected #endif - namespace openPMD { namespace traits @@ -46,49 +45,49 @@ namespace traits * insert() of a new element. The passed parameter is an iterator to the * newly added element. */ - template< typename U > + template struct GenerationPolicy { - template< typename T > + template void operator()(T &) - { - } + {} }; -} // traits +} // namespace traits namespace internal { -class SeriesData; + class SeriesData; } namespace detail { -/* - * This converts the key (first parameter) to its string name within the - * openPMD hierarchy. - * If the key is found to be equal to RecordComponent::SCALAR, the parentKey - * will be returned, adding RecordComponent::SCALAR to its back. - * Reason: Scalar record components do not link their containing record as - * parent, but rather the parent's parent, so the own key within the "apparent" - * parent must be given as two steps. - */ -template< typename T > -std::vector< std::string > -keyAsString( T && key, std::vector< std::string > const & parentKey ) -{ - ( void )parentKey; - return { std::to_string( std::forward< T >( key ) ) }; -} + /* + * This converts the key (first parameter) to its string name within the + * openPMD hierarchy. + * If the key is found to be equal to RecordComponent::SCALAR, the parentKey + * will be returned, adding RecordComponent::SCALAR to its back. + * Reason: Scalar record components do not link their containing record as + * parent, but rather the parent's parent, so the own key within the + * "apparent" parent must be given as two steps. + */ + template + std::vector + keyAsString(T &&key, std::vector const &parentKey) + { + (void)parentKey; + return {std::to_string(std::forward(key))}; + } -// moved to a *.cpp file so we don't need to include RecordComponent.hpp here -template<> -std::vector< std::string > keyAsString< std::string const & >( - std::string const & key, std::vector< std::string > const & parentKey ); + // moved to a *.cpp file so we don't need to include RecordComponent.hpp + // here + template <> + std::vector keyAsString( + std::string const &key, std::vector const &parentKey); -template<> -std::vector< std::string > keyAsString< std::string >( - std::string && key, std::vector< std::string > const & parentKey ); -} + template <> + std::vector keyAsString( + std::string &&key, std::vector const &parentKey); +} // namespace detail /** @brief Map-like container that enforces openPMD requirements and handles IO. * @@ -96,17 +95,17 @@ std::vector< std::string > keyAsString< std::string >( * * @tparam T Type of objects stored * @tparam T_key Key type to look elements up by - * @tparam T_container Type of container used for internal storage (must supply the same type traits and interface as std::map) + * @tparam T_container Type of container used for internal storage (must supply + * the same type traits and interface as std::map) */ -template< - typename T, - typename T_key = std::string, - typename T_container = std::map< T_key, T > -> +template < + typename T, + typename T_key = std::string, + typename T_container = std::map > class Container : public LegacyAttributable { static_assert( - std::is_base_of< AttributableInterface, T >::value, + std::is_base_of::value, "Type of container element must be derived from Writable"); friend class Iteration; @@ -132,63 +131,120 @@ class Container : public LegacyAttributable using iterator = typename InternalContainer::iterator; using const_iterator = typename InternalContainer::const_iterator; - Container(Container const&) = default; + Container(Container const &) = default; virtual ~Container() = default; - iterator begin() noexcept { return m_container->begin(); } - const_iterator begin() const noexcept { return m_container->begin(); } - const_iterator cbegin() const noexcept { return m_container->cbegin(); } + iterator begin() noexcept + { + return m_container->begin(); + } + const_iterator begin() const noexcept + { + return m_container->begin(); + } + const_iterator cbegin() const noexcept + { + return m_container->cbegin(); + } - iterator end() noexcept { return m_container->end(); } - const_iterator end() const noexcept { return m_container->end(); } - const_iterator cend() const noexcept { return m_container->cend(); } + iterator end() noexcept + { + return m_container->end(); + } + const_iterator end() const noexcept + { + return m_container->end(); + } + const_iterator cend() const noexcept + { + return m_container->cend(); + } - bool empty() const noexcept { return m_container->empty(); } + bool empty() const noexcept + { + return m_container->empty(); + } - size_type size() const noexcept { return m_container->size(); } + size_type size() const noexcept + { + return m_container->size(); + } /** Remove all objects from the container and (if written) from disk. * - * @note Calling this operation on any container in a Series with Access::READ_ONLY will throw an exception. + * @note Calling this operation on any container in a Series with + * Access::READ_ONLY will throw an exception. * @throws std::runtime_error */ void clear() { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not clear a container in a read-only Series."); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not clear a container in a read-only Series."); clear_unchecked(); } - std::pair< iterator, bool > insert(value_type const& value) { return m_container->insert(value); } - template< class P > - std::pair< iterator, bool > insert(P&& value) { return m_container->insert(value); } - iterator insert(const_iterator hint, value_type const& value) { return m_container->insert(hint, value); } - template< class P > - iterator insert(const_iterator hint, P&& value) { return m_container->insert(hint, value); } - template< class InputIt > - void insert(InputIt first, InputIt last) { m_container->insert(first, last); } - void insert(std::initializer_list< value_type > ilist) { m_container->insert(ilist); } + std::pair insert(value_type const &value) + { + return m_container->insert(value); + } + template + std::pair insert(P &&value) + { + return m_container->insert(value); + } + iterator insert(const_iterator hint, value_type const &value) + { + return m_container->insert(hint, value); + } + template + iterator insert(const_iterator hint, P &&value) + { + return m_container->insert(hint, value); + } + template + void insert(InputIt first, InputIt last) + { + m_container->insert(first, last); + } + void insert(std::initializer_list ilist) + { + m_container->insert(ilist); + } - void swap(Container & other) { m_container->swap(other.m_container); } + void swap(Container &other) + { + m_container->swap(other.m_container); + } - mapped_type& at(key_type const& key) { return m_container->at(key); } - mapped_type const& at(key_type const& key) const { return m_container->at(key); } + mapped_type &at(key_type const &key) + { + return m_container->at(key); + } + mapped_type const &at(key_type const &key) const + { + return m_container->at(key); + } - /** Access the value that is mapped to a key equivalent to key, creating it if such key does not exist already. + /** Access the value that is mapped to a key equivalent to key, creating it + * if such key does not exist already. * * @param key Key of the element to find (lvalue). - * @return Reference to the mapped value of the new element if no element with key key existed. Otherwise a reference to the mapped value of the existing element whose key is equivalent to key. - * @throws std::out_of_range error if in READ_ONLY mode and key does not exist, otherwise key will be created + * @return Reference to the mapped value of the new element if no element + * with key key existed. Otherwise a reference to the mapped value of the + * existing element whose key is equivalent to key. + * @throws std::out_of_range error if in READ_ONLY mode and key does not + * exist, otherwise key will be created */ - virtual mapped_type& operator[](key_type const& key) + virtual mapped_type &operator[](key_type const &key) { auto it = m_container->find(key); - if( it != m_container->end() ) + if (it != m_container->end()) return it->second; else { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) { auxiliary::OutOfRangeMsg const out_of_range_msg; throw std::out_of_range(out_of_range_msg(key)); @@ -196,28 +252,32 @@ class Container : public LegacyAttributable T t = T(); t.linkHierarchy(writable()); - auto& ret = m_container->insert({key, std::move(t)}).first->second; + auto &ret = m_container->insert({key, std::move(t)}).first->second; ret.writable().ownKeyWithinParent = - detail::keyAsString( key, writable().ownKeyWithinParent ); - traits::GenerationPolicy< T > gen; + detail::keyAsString(key, writable().ownKeyWithinParent); + traits::GenerationPolicy gen; gen(ret); return ret; } } - /** Access the value that is mapped to a key equivalent to key, creating it if such key does not exist already. + /** Access the value that is mapped to a key equivalent to key, creating it + * if such key does not exist already. * * @param key Key of the element to find (rvalue). - * @return Reference to the mapped value of the new element if no element with key key existed. Otherwise a reference to the mapped value of the existing element whose key is equivalent to key. - * @throws std::out_of_range error if in READ_ONLY mode and key does not exist, otherwise key will be created + * @return Reference to the mapped value of the new element if no element + * with key key existed. Otherwise a reference to the mapped value of the + * existing element whose key is equivalent to key. + * @throws std::out_of_range error if in READ_ONLY mode and key does not + * exist, otherwise key will be created */ - virtual mapped_type& operator[](key_type&& key) + virtual mapped_type &operator[](key_type &&key) { auto it = m_container->find(key); - if( it != m_container->end() ) + if (it != m_container->end()) return it->second; else { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) { auxiliary::OutOfRangeMsg out_of_range_msg; throw std::out_of_range(out_of_range_msg(key)); @@ -225,51 +285,66 @@ class Container : public LegacyAttributable T t = T(); t.linkHierarchy(writable()); - auto& ret = m_container->insert({key, std::move(t)}).first->second; + auto &ret = m_container->insert({key, std::move(t)}).first->second; ret.writable().ownKeyWithinParent = detail::keyAsString( - std::move( key ), writable().ownKeyWithinParent ); - traits::GenerationPolicy< T > gen; - gen( ret ); + std::move(key), writable().ownKeyWithinParent); + traits::GenerationPolicy gen; + gen(ret); return ret; } } - iterator find(key_type const& key) { return m_container->find(key); } - const_iterator find(key_type const& key) const { return m_container->find(key); } + iterator find(key_type const &key) + { + return m_container->find(key); + } + const_iterator find(key_type const &key) const + { + return m_container->find(key); + } /** This returns either 1 if the key is found in the container of 0 if not. * * @param key key value of the element to count * @return since keys are unique in this container, returns 0 or 1 */ - size_type count(key_type const& key) const { return m_container->count(key); } + size_type count(key_type const &key) const + { + return m_container->count(key); + } - /** Checks if there is an element with a key equivalent to an exiting key in the container. + /** Checks if there is an element with a key equivalent to an exiting key in + * the container. * * @param key key value of the element to search for * @return true of key is found, else false */ - bool contains(key_type const& key) const { return m_container->find(key) != m_container->end(); } + bool contains(key_type const &key) const + { + return m_container->find(key) != m_container->end(); + } /** Remove a single element from the container and (if written) from disk. * - * @note Calling this operation on any container in a Series with Access::READ_ONLY will throw an exception. + * @note Calling this operation on any container in a Series with + * Access::READ_ONLY will throw an exception. * @throws std::runtime_error * @param key Key of the element to remove. * @return Number of elements removed (either 0 or 1). */ - virtual size_type erase(key_type const& key) + virtual size_type erase(key_type const &key) { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not erase from a container in a read-only Series."); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not erase from a container in a read-only Series."); auto res = m_container->find(key); - if( res != m_container->end() && res->second.written() ) + if (res != m_container->end() && res->second.written()) { - Parameter< Operation::DELETE_PATH > pDelete; + Parameter pDelete; pDelete.path = "."; IOHandler()->enqueue(IOTask(&res->second, pDelete)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); } return m_container->erase(key); } @@ -277,15 +352,16 @@ class Container : public LegacyAttributable //! @todo why does const_iterator not work compile with pybind11? virtual iterator erase(iterator res) { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not erase from a container in a read-only Series."); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not erase from a container in a read-only Series."); - if( res != m_container->end() && res->second.written() ) + if (res != m_container->end() && res->second.written()) { - Parameter< Operation::DELETE_PATH > pDelete; + Parameter pDelete; pDelete.path = "."; IOHandler()->enqueue(IOTask(&res->second, pDelete)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); } return m_container->erase(res); } @@ -293,38 +369,40 @@ class Container : public LegacyAttributable // virtual iterator erase(const_iterator first, const_iterator last) template - auto emplace(Args&&... args) - -> decltype(InternalContainer().emplace(std::forward(args)...)) + auto emplace(Args &&...args) + -> decltype(InternalContainer().emplace(std::forward(args)...)) { return m_container->emplace(std::forward(args)...); } -OPENPMD_protected: + OPENPMD_protected: Container() : m_container{std::make_shared< InternalContainer >()} - { } + {} void clear_unchecked() { - if( written() ) - throw std::runtime_error("Clearing a written container not (yet) implemented."); + if (written()) + throw std::runtime_error( + "Clearing a written container not (yet) implemented."); m_container->clear(); } - virtual void flush(std::string const& path) + virtual void + flush(std::string const &path, internal::FlushParams const &flushParams) { - if( !written() ) + if (!written()) { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = path; IOHandler()->enqueue(IOTask(this, pCreate)); } - flushAttributes(); + flushAttributes(flushParams); } - std::shared_ptr< InternalContainer > m_container; + std::shared_ptr m_container; /** * This class wraps a Container and forwards operator[]() and at() to it. @@ -336,7 +414,7 @@ class Container : public LegacyAttributable */ class EraseStaleEntries { - std::set< key_type > m_accessedKeys; + std::set m_accessedKeys; /* * Note: Putting a copy here leads to weird bugs due to destructors * being called too eagerly upon destruction. @@ -344,26 +422,25 @@ class Container : public LegacyAttributable * Container class template * (https://github.com/openPMD/openPMD-api/pull/886) */ - Container & m_originalContainer; + Container &m_originalContainer; public: - explicit EraseStaleEntries( Container & container_in ) - : m_originalContainer( container_in ) - { - } + explicit EraseStaleEntries(Container &container_in) + : m_originalContainer(container_in) + {} - template< typename K > - mapped_type & operator[]( K && k ) + template + mapped_type &operator[](K &&k) { - m_accessedKeys.insert( k ); // copy - return m_originalContainer[ std::forward< K >( k ) ]; + m_accessedKeys.insert(k); // copy + return m_originalContainer[std::forward(k)]; } - template< typename K > - mapped_type & at( K && k ) + template + mapped_type &at(K &&k) { - m_accessedKeys.insert( k ); // copy - return m_originalContainer.at( std::forward< K >( k ) ); + m_accessedKeys.insert(k); // copy + return m_originalContainer.at(std::forward(k)); } /** @@ -371,37 +448,37 @@ class Container : public LegacyAttributable * If the key is not accessed after this again, it will be deleted along * with all other unaccessed keys upon destruction. */ - template< typename K > - void forget( K && k ) + template + void forget(K &&k) { - m_accessedKeys.erase( std::forward< K >( k ) ); + m_accessedKeys.erase(std::forward(k)); } ~EraseStaleEntries() { - auto & map = *m_originalContainer.m_container; + auto &map = *m_originalContainer.m_container; using iterator_t = typename InternalContainer::const_iterator; - std::vector< iterator_t > deleteMe; - deleteMe.reserve( map.size() - m_accessedKeys.size() ); - for( iterator_t it = map.begin(); it != map.end(); ++it ) + std::vector deleteMe; + deleteMe.reserve(map.size() - m_accessedKeys.size()); + for (iterator_t it = map.begin(); it != map.end(); ++it) { - auto lookup = m_accessedKeys.find( it->first ); - if( lookup == m_accessedKeys.end() ) + auto lookup = m_accessedKeys.find(it->first); + if (lookup == m_accessedKeys.end()) { - deleteMe.push_back( it ); + deleteMe.push_back(it); } } - for( auto & it : deleteMe ) + for (auto &it : deleteMe) { - map.erase( it ); + map.erase(it); } } }; EraseStaleEntries eraseStaleEntries() { - return EraseStaleEntries( *this ); + return EraseStaleEntries(*this); } }; -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/backend/MeshRecordComponent.hpp b/include/openPMD/backend/MeshRecordComponent.hpp index 18f9af2c9b..20f5a9b42a 100644 --- a/include/openPMD/backend/MeshRecordComponent.hpp +++ b/include/openPMD/backend/MeshRecordComponent.hpp @@ -24,18 +24,12 @@ #include - namespace openPMD { class MeshRecordComponent : public RecordComponent { - template< - typename T, - typename T_key, - typename T_container - > - friend - class Container; + template + friend class Container; friend class Mesh; @@ -52,8 +46,8 @@ class MeshRecordComponent : public RecordComponent * * @return relative position within range of [0.0:1.0) */ - template< typename T > - std::vector< T > position() const; + template + std::vector position() const; /** Position on an element * @@ -61,8 +55,8 @@ class MeshRecordComponent : public RecordComponent * * @param[in] pos relative position in range [0.0:1.0) */ - template< typename T > - MeshRecordComponent& setPosition(std::vector< T > pos); + template + MeshRecordComponent &setPosition(std::vector pos); /** Create a dataset with regular extent and constant value * @@ -74,19 +68,18 @@ class MeshRecordComponent : public RecordComponent * @tparam T type of the stored value * @return A reference to this RecordComponent. */ - template< typename T > - MeshRecordComponent& makeConstant(T); + template + MeshRecordComponent &makeConstant(T); }; +template +std::vector MeshRecordComponent::position() const +{ + return readVectorFloatingpoint("position"); +} -template< typename T > -std::vector< T > -MeshRecordComponent::position() const -{ return readVectorFloatingpoint< T >("position"); } - -template< typename T > -inline MeshRecordComponent& -MeshRecordComponent::makeConstant(T value) +template +inline MeshRecordComponent &MeshRecordComponent::makeConstant(T value) { RecordComponent::makeConstant(value); return *this; diff --git a/include/openPMD/backend/PatchRecord.hpp b/include/openPMD/backend/PatchRecord.hpp index 46e2a4bd42..84d180bac5 100644 --- a/include/openPMD/backend/PatchRecord.hpp +++ b/include/openPMD/backend/PatchRecord.hpp @@ -20,29 +20,29 @@ */ #pragma once -#include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/PatchRecordComponent.hpp" -#include #include - +#include namespace openPMD { -class PatchRecord : public BaseRecord< PatchRecordComponent > +class PatchRecord : public BaseRecord { - friend class Container< PatchRecord >; + friend class Container; friend class ParticleSpecies; friend class ParticlePatches; public: - PatchRecord& setUnitDimension(std::map< UnitDimension, double > const&); + PatchRecord &setUnitDimension(std::map const &); ~PatchRecord() override = default; private: PatchRecord() = default; - void flush_impl(std::string const&) override; + void + flush_impl(std::string const &, internal::FlushParams const &) override; void read() override; -}; //PatchRecord -} // openPMD +}; // PatchRecord +} // namespace openPMD diff --git a/include/openPMD/backend/PatchRecordComponent.hpp b/include/openPMD/backend/PatchRecordComponent.hpp index b0aed3a0d1..87b9f625b5 100644 --- a/include/openPMD/backend/PatchRecordComponent.hpp +++ b/include/openPMD/backend/PatchRecordComponent.hpp @@ -22,17 +22,16 @@ #include "openPMD/backend/BaseRecordComponent.hpp" -#include -#include #include #include +#include +#include // expose private and protected members for invasive testing #ifndef OPENPMD_private -# define OPENPMD_private private +#define OPENPMD_private private #endif - namespace openPMD { @@ -41,40 +40,36 @@ namespace openPMD */ class PatchRecordComponent : public BaseRecordComponent { - template< - typename T, - typename T_key, - typename T_container - > - friend - class Container; - - template< typename > friend class BaseRecord; + template + friend class Container; + + template + friend class BaseRecord; friend class ParticlePatches; friend class PatchRecord; public: - PatchRecordComponent& setUnitSI(double); + PatchRecordComponent &setUnitSI(double); - PatchRecordComponent& resetDataset(Dataset); + PatchRecordComponent &resetDataset(Dataset); uint8_t getDimensionality() const; Extent getExtent() const; - template< typename T > - std::shared_ptr< T > load(); - template< typename T > - void load(std::shared_ptr< T >); - template< typename T > + template + std::shared_ptr load(); + template + void load(std::shared_ptr); + template void store(uint64_t idx, T); -OPENPMD_private: + OPENPMD_private: PatchRecordComponent(); - void flush(std::string const&); + void flush(std::string const &, internal::FlushParams const &); void read(); - std::shared_ptr< std::queue< IOTask > > m_chunks; + std::shared_ptr > m_chunks; /** * @brief Check recursively whether this RecordComponent is dirty. @@ -84,69 +79,67 @@ class PatchRecordComponent : public BaseRecordComponent * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; }; // PatchRecordComponent -template< typename T > -inline std::shared_ptr< T > -PatchRecordComponent::load() +template +inline std::shared_ptr PatchRecordComponent::load() { uint64_t numPoints = getExtent()[0]; - auto newData = std::shared_ptr< T >(new T[numPoints], []( T *p ){ delete [] p; }); + auto newData = + std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); load(newData); return newData; } -template< typename T > -inline void -PatchRecordComponent::load(std::shared_ptr< T > data) +template +inline void PatchRecordComponent::load(std::shared_ptr data) { - Datatype dtype = determineDatatype< T >(); - if( dtype != getDatatype() ) - throw std::runtime_error("Type conversion during particle patch loading not yet implemented"); + Datatype dtype = determineDatatype(); + if (dtype != getDatatype()) + throw std::runtime_error( + "Type conversion during particle patch loading not yet " + "implemented"); - if( !data ) - throw std::runtime_error("Unallocated pointer passed during ParticlePatch loading."); + if (!data) + throw std::runtime_error( + "Unallocated pointer passed during ParticlePatch loading."); uint64_t numPoints = getExtent()[0]; //! @todo add support for constant patch record components - Parameter< Operation::READ_DATASET > dRead; + Parameter dRead; dRead.offset = {0}; dRead.extent = {numPoints}; dRead.dtype = getDatatype(); - dRead.data = std::static_pointer_cast< void >(data); + dRead.data = std::static_pointer_cast(data); m_chunks->push(IOTask(this, dRead)); } -template< typename T > -inline void -PatchRecordComponent::store(uint64_t idx, T data) +template +inline void PatchRecordComponent::store(uint64_t idx, T data) { - Datatype dtype = determineDatatype< T >(); - if( dtype != getDatatype() ) + Datatype dtype = determineDatatype(); + if (dtype != getDatatype()) { std::ostringstream oss; - oss << "Datatypes of patch data (" - << dtype - << ") and dataset (" - << getDatatype() - << ") do not match."; + oss << "Datatypes of patch data (" << dtype << ") and dataset (" + << getDatatype() << ") do not match."; throw std::runtime_error(oss.str()); } Extent dse = getExtent(); - if( dse[0] - 1u < idx ) - throw std::runtime_error("Index does not reside inside patch (no. patches: " + std::to_string(dse[0]) - + " - index: " + std::to_string(idx) + ")"); + if (dse[0] - 1u < idx) + throw std::runtime_error( + "Index does not reside inside patch (no. patches: " + + std::to_string(dse[0]) + " - index: " + std::to_string(idx) + ")"); - Parameter< Operation::WRITE_DATASET > dWrite; + Parameter dWrite; dWrite.offset = {idx}; dWrite.extent = {1}; dWrite.dtype = dtype; - dWrite.data = std::make_shared< T >(data); + dWrite.data = std::make_shared(data); m_chunks->push(IOTask(this, dWrite)); } } // namespace openPMD diff --git a/include/openPMD/backend/Writable.hpp b/include/openPMD/backend/Writable.hpp index 51c94d7fa4..114e2f7566 100644 --- a/include/openPMD/backend/Writable.hpp +++ b/include/openPMD/backend/Writable.hpp @@ -22,37 +22,36 @@ #include "openPMD/IO/AbstractIOHandler.hpp" -#include #include +#include #include // expose private and protected members for invasive testing #ifndef OPENPMD_private -# define OPENPMD_private private +#define OPENPMD_private private #endif - namespace openPMD { namespace test { -struct TestHelper; + struct TestHelper; } // namespace test class AbstractFilePosition; class AbstractIOHandler; struct ADIOS2FilePosition; template class AbstractIOHandlerImplCommon; -template +template class Span; namespace internal { -class AttributableData; + class AttributableData; } - -/** @brief Layer to mirror structure of logical data and persistent data in file. +/** @brief Layer to mirror structure of logical data and persistent data in + * file. * * Hierarchy of objects (datasets, groups, attributes, ...) in openPMD is * managed in this class. @@ -65,13 +64,9 @@ class Writable final { friend class internal::AttributableData; friend class AttributableInterface; - template< typename T_elem > + template friend class BaseRecord; - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; friend class Iteration; friend class Mesh; @@ -86,21 +81,21 @@ class Writable final friend class AbstractIOHandlerImplCommon; friend class JSONIOHandlerImpl; friend struct test::TestHelper; - friend std::string concrete_h5_file_position(Writable*); - friend std::string concrete_bp1_file_position(Writable*); - template + friend std::string concrete_h5_file_position(Writable *); + friend std::string concrete_bp1_file_position(Writable *); + template friend class Span; private: - Writable( internal::AttributableData * ); + Writable(internal::AttributableData *); public: ~Writable() = default; - Writable( Writable const & other ) = delete; - Writable( Writable && other ) = delete; - Writable & operator=( Writable const & other ) = delete; - Writable & operator=( Writable && other ) = delete; + Writable(Writable const &other) = delete; + Writable(Writable &&other) = delete; + Writable &operator=(Writable const &other) = delete; + Writable &operator=(Writable &&other) = delete; /** Flush the corresponding Series object * @@ -111,24 +106,24 @@ class Writable final */ void seriesFlush(); -OPENPMD_private: - void seriesFlush( FlushLevel ); + OPENPMD_private: + void seriesFlush(internal::FlushParams); /* * These members need to be shared pointers since distinct instances of * Writable may share them. */ - std::shared_ptr< AbstractFilePosition > abstractFilePosition; - std::shared_ptr< AbstractIOHandler > IOHandler; - internal::AttributableData* attributable; - Writable* parent; - bool dirty; + std::shared_ptr abstractFilePosition = nullptr; + std::shared_ptr IOHandler = nullptr; + internal::AttributableData *attributable = nullptr; + Writable *parent = nullptr; + bool dirty = true; /** * If parent is not null, then this is a vector of keys such that: * &(*parent)[key_1]...[key_n] == this * (Notice that scalar record components do not link their direct parent, * but instead their parent's parent, hence a vector of keys) */ - std::vector< std::string > ownKeyWithinParent; + std::vector ownKeyWithinParent; /** * @brief Whether a Writable has been written to the backend. * @@ -144,6 +139,6 @@ class Writable final * Writable and its meaning within the current dataset. * */ - bool written; + bool written = false; }; } // namespace openPMD diff --git a/include/openPMD/benchmark/MemoryProfiler.hpp b/include/openPMD/benchmark/MemoryProfiler.hpp index ea534c8f6a..783052cccf 100644 --- a/include/openPMD/benchmark/MemoryProfiler.hpp +++ b/include/openPMD/benchmark/MemoryProfiler.hpp @@ -25,7 +25,6 @@ #include #include - namespace openPMD { namespace benchmark @@ -42,13 +41,13 @@ namespace benchmark * @param[in] rank MPI rank * @param[in] tag item name to measure */ - MemoryProfiler( int rank, const std::string& tag ) - : m_Rank( rank ), m_Name( "" ) + MemoryProfiler(int rank, const std::string &tag) + : m_Rank(rank), m_Name("") { #if defined(__linux) - //m_Name = "/proc/meminfo"; + // m_Name = "/proc/meminfo"; m_Name = "/proc/self/status"; - Display( tag ); + Display(tag); #endif } @@ -59,33 +58,36 @@ namespace benchmark * * @param tag item name to measure */ - void Display(const std::string& tag){ + void Display(const std::string &tag) + { if (0 == m_Name.size()) return; if (m_Rank > 0) return; - std::cout<<" memory at: "< #include - namespace openPMD { namespace benchmark { /** The Timer class for profiling purpose * - * Simple Timer that measures time consumption btw constructor and destructor - * Reports at rank 0 at the console, for immediate convenience + * Simple Timer that measures time consumption btw constructor and + * destructor Reports at rank 0 at the console, for immediate convenience */ class Timer { public: using Clock = std::chrono::system_clock; - using TimePoint = std::chrono::time_point< Clock >; + using TimePoint = std::chrono::time_point; /** Simple Timer * @@ -50,31 +49,40 @@ namespace benchmark * @param rank MPI rank * @param progStart time point at program start */ - Timer( const std::string& tag, int rank, TimePoint progStart ) - : m_ProgStart( progStart ), - m_Start( std::chrono::system_clock::now() ), - m_Tag( tag ), - m_Rank( rank ) + Timer(const std::string &tag, int rank, TimePoint progStart) + : m_ProgStart(progStart) + , m_Start(std::chrono::system_clock::now()) + , m_Tag(tag) + , m_Rank(rank) { - MemoryProfiler( rank, tag ); + MemoryProfiler(rank, tag); } - ~Timer() { + ~Timer() + { std::string tt = "~" + m_Tag; - MemoryProfiler (m_Rank, tt.c_str()); + MemoryProfiler(m_Rank, tt.c_str()); m_End = Clock::now(); - double millis = std::chrono::duration_cast< std::chrono::milliseconds >( m_End - m_Start ).count(); - double secs = millis/1000.0; - if( m_Rank > 0 ) + double millis = + std::chrono::duration_cast( + m_End - m_Start) + .count(); + double secs = millis / 1000.0; + if (m_Rank > 0) return; std::cout << " [" << m_Tag << "] took:" << secs << " seconds\n"; - std::cout<<" " << m_Tag <<" From ProgStart in seconds "<< - std::chrono::duration_cast(m_End - m_ProgStart).count()/1000.0<( + m_End - m_ProgStart) + .count() / + 1000.0 + << std::endl; - std::cout< sliceBlock( - Extent & totalExtent, - int size, - int rank - ) = 0; + virtual std::pair + sliceBlock(Extent &totalExtent, int size, int rank) = 0; - /** This class will be derived from - */ - virtual ~BlockSlicer() = default; - }; -} + /** This class will be derived from + */ + virtual ~BlockSlicer() = default; +}; +} // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/DatasetFiller.hpp b/include/openPMD/benchmark/mpi/DatasetFiller.hpp index 23dec8023a..762d171cbb 100644 --- a/include/openPMD/benchmark/mpi/DatasetFiller.hpp +++ b/include/openPMD/benchmark/mpi/DatasetFiller.hpp @@ -21,105 +21,92 @@ #pragma once - -#include #include "openPMD/Dataset.hpp" +#include #include - namespace openPMD { - /** - * An abstract class to create one iteration of data per thread. - * @tparam T The type of data to produce. - */ - template< typename T > - class DatasetFiller - { - protected: - Extent::value_type m_numberOfItems; - public: - using resultType = T; - - explicit DatasetFiller( Extent::value_type numberOfItems = 0 ); - - /** This class will be derived from - */ - virtual ~DatasetFiller() = default; - - /** - * Create a shared pointer of m_numberOfItems items of type T. - * Should take roughly the same amount of time per call as long as - * m_numberOfItems does not change. - * @return - */ - virtual std::shared_ptr< T > produceData( ) = 0; - - /** - * Set number of items to be produced. - * @param numberOfItems The number. - */ - virtual void setNumberOfItems( Extent::value_type numberOfItems ) = 0; - }; - +/** + * An abstract class to create one iteration of data per thread. + * @tparam T The type of data to produce. + */ +template +class DatasetFiller +{ +protected: + Extent::value_type m_numberOfItems; - template< typename T > - DatasetFiller< T >::DatasetFiller( Extent::value_type numberOfItems ) : - m_numberOfItems( numberOfItems ) - {} +public: + using resultType = T; + explicit DatasetFiller(Extent::value_type numberOfItems = 0); - template< typename DF > - class SimpleDatasetFillerProvider - { - public: - using resultType = typename DF::resultType; - private: - std::shared_ptr< DF > m_df; + /** This class will be derived from + */ + virtual ~DatasetFiller() = default; + /** + * Create a shared pointer of m_numberOfItems items of type T. + * Should take roughly the same amount of time per call as long as + * m_numberOfItems does not change. + * @return + */ + virtual std::shared_ptr produceData() = 0; - template< - typename T, - typename Dummy=void - > - struct Helper - { - std::shared_ptr< DatasetFiller< T>> operator()( std::shared_ptr & ) - { - throw std::runtime_error( - "Can only create data of type " + - datatypeToString( determineDatatype< resultType >( ) ) - ); - } - }; - - template< typename Dummy > - struct Helper< - resultType, - Dummy - > - { - std::shared_ptr< DatasetFiller< resultType>> operator()(std::shared_ptr &df ) - { - return df; - } - }; + /** + * Set number of items to be produced. + * @param numberOfItems The number. + */ + virtual void setNumberOfItems(Extent::value_type numberOfItems) = 0; +}; - public: +template +DatasetFiller::DatasetFiller(Extent::value_type numberOfItems) + : m_numberOfItems(numberOfItems) +{} +template +class SimpleDatasetFillerProvider +{ +public: + using resultType = typename DF::resultType; - explicit SimpleDatasetFillerProvider( DF df ) : - m_df { std::make_shared< DF >( std::move( df ) ) } - {} +private: + std::shared_ptr m_df; + template + struct Helper + { + std::shared_ptr> operator()(std::shared_ptr &) + { + throw std::runtime_error( + "Can only create data of type " + + datatypeToString(determineDatatype())); + } + }; - template< typename T > - std::shared_ptr< DatasetFiller< T >> operator()( ) + template + struct Helper + { + std::shared_ptr> + operator()(std::shared_ptr &df) { - Helper< T > h; - return h( m_df ); + return df; } }; +public: + explicit SimpleDatasetFillerProvider(DF df) + : m_df{std::make_shared(std::move(df))} + {} + + template + std::shared_ptr> operator()() + { + Helper h; + return h(m_df); + } +}; -} +} // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp index ed7f737f89..abe6284a4a 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp @@ -26,170 +26,163 @@ #include "RandomDatasetFiller.hpp" -#include "openPMD/openPMD.hpp" #include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/benchmark/mpi/MPIBenchmarkReport.hpp" -#include "openPMD/benchmark/mpi/DatasetFiller.hpp" #include "openPMD/benchmark/mpi/BlockSlicer.hpp" +#include "openPMD/benchmark/mpi/DatasetFiller.hpp" +#include "openPMD/benchmark/mpi/MPIBenchmarkReport.hpp" +#include "openPMD/openPMD.hpp" #include #include #include #include +#include #include +#include #include #include -#include -#include - namespace openPMD { - /** - * Class representing a benchmark. - * Allows to configure a benchmark and execute it. - * @tparam DatasetFillerProvider Functor type to create a DatasetFiller with - * the requested type. Should have a templated operator()() returning a value - * that can be dynamically casted to a std::shared_ptr>. - */ - template< typename DatasetFillerProvider > - class MPIBenchmark - { - - public: - using extentT = Extent::value_type; - MPI_Comm communicator = MPI_COMM_WORLD; - - /** - * Total extent of the hypercuboid used in the benchmark. - */ - Extent totalExtent; - - std::shared_ptr< BlockSlicer > m_blockSlicer; +/** + * Class representing a benchmark. + * Allows to configure a benchmark and execute it. + * @tparam DatasetFillerProvider Functor type to create a DatasetFiller with + * the requested type. Should have a templated operator()() returning a value + * that can be dynamically casted to a + * std::shared_ptr>. + */ +template +class MPIBenchmark +{ - DatasetFillerProvider m_dfp; +public: + using extentT = Extent::value_type; + MPI_Comm communicator = MPI_COMM_WORLD; + /** + * Total extent of the hypercuboid used in the benchmark. + */ + Extent totalExtent; - /** - * Construct an MPI benchmark manually. - * @param basePath The path to write to. Will be extended with the - * backends' filename endings. May be overwritten if performing several - * benchmarks with the same backend, e.g. when using different compression - * schemes. - * @param tExtent The total extent of the dataset. - * @param blockSlicer An implementation of BlockSlicer class, associating - * each thread with a portion of the dataset to write to. - * @param dfp DatasetFillerProvider, a templated functor returning a - * std::shared_ptr> or a value dynamically - * castable to one. - * @param comm MPI communicator. - */ - MPIBenchmark( - std::string basePath, - Extent tExtent, - std::shared_ptr< BlockSlicer > blockSlicer, - DatasetFillerProvider dfp, - MPI_Comm comm = MPI_COMM_WORLD - ); + std::shared_ptr m_blockSlicer; - /** - * @param compression Compression string, leave empty to disable commpression. - * @param compressionLevel Compression level. - * @param backend Backend to use, specified by filename extension (eg "bp" or "h5"). - * @param dt Type of data to write and read. - * @param iterations The number of iterations to write and read for each - * compression strategy. The DatasetFiller functor will be called for each - * iteration, so it should create sufficient data for one iteration. - * @param threadSize Number of threads to use. - */ - void addConfiguration( - std::string compression, - uint8_t compressionLevel, - std::string backend, - Datatype dt, - typename decltype( Series::iterations )::key_type iterations, - int threadSize - ); + DatasetFillerProvider m_dfp; - /** - * Version of addConfiguration() that automatically sets the number of used - * threads to the MPI size. - * @param compression Compression string, leave empty to disable commpression. - * @param compressionLevel Compression level. - * @param backend Backend to use, specified by filename extension (eg "bp" or "h5"). - * @param dt Type of data to write and read. - * @param iterations The number of iterations to write and read for each - * compression strategy. The DatasetFiller functor will be called for each - * iteration, so it should create sufficient data for one iteration. - */ - void addConfiguration( - std::string compression, - uint8_t compressionLevel, - std::string backend, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations - ); + /** + * Construct an MPI benchmark manually. + * @param basePath The path to write to. Will be extended with the + * backends' filename endings. May be overwritten if performing several + * benchmarks with the same backend, e.g. when using different compression + * schemes. + * @param tExtent The total extent of the dataset. + * @param blockSlicer An implementation of BlockSlicer class, associating + * each thread with a portion of the dataset to write to. + * @param dfp DatasetFillerProvider, a templated functor returning a + * std::shared_ptr> or a value dynamically + * castable to one. + * @param comm MPI communicator. + */ + MPIBenchmark( + std::string basePath, + Extent tExtent, + std::shared_ptr blockSlicer, + DatasetFillerProvider dfp, + MPI_Comm comm = MPI_COMM_WORLD); - void resetConfigurations( ); + /** + * @param compression Compression string, leave empty to disable + * commpression. + * @param compressionLevel Compression level. + * @param backend Backend to use, specified by filename extension (eg "bp" + * or "h5"). + * @param dt Type of data to write and read. + * @param iterations The number of iterations to write and read for each + * compression strategy. The DatasetFiller functor will be called for each + * iteration, so it should create sufficient data for one iteration. + * @param threadSize Number of threads to use. + */ + void addConfiguration( + std::string compression, + uint8_t compressionLevel, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations, + int threadSize); + /** + * Version of addConfiguration() that automatically sets the number of used + * threads to the MPI size. + * @param compression Compression string, leave empty to disable + * commpression. + * @param compressionLevel Compression level. + * @param backend Backend to use, specified by filename extension (eg "bp" + * or "h5"). + * @param dt Type of data to write and read. + * @param iterations The number of iterations to write and read for each + * compression strategy. The DatasetFiller functor will be called for each + * iteration, so it should create sufficient data for one iteration. + */ + void addConfiguration( + std::string compression, + uint8_t compressionLevel, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations); - /** - * Main function for running a benchmark. The benchmark is repeated for all - * previously requested compressions strategies, backends and thread sizes. - * @tparam Clock Clock type to use. - * @param rootThread Rank at which the report will be read. - * @return A report about the time needed for writing and reading under each - * compression strategy. - */ - template< typename Clock > - MPIBenchmarkReport< typename Clock::duration > runBenchmark( - int rootThread = 0 - ); - - private: - std::string m_basePath; - std::vector< - std::tuple< - std::string, - uint8_t, - std::string, - int, - Datatype, - typename decltype( Series::iterations)::key_type>> - m_configurations; - - enum Config - { - COMPRESSION = 0, - COMPRESSION_LEVEL, - BACKEND, - NRANKS, - DTYPE, - ITERATIONS - }; - - std::pair< - Offset, - Extent - > slice( int size ); + void resetConfigurations(); - /** - * @brief Struct used by MPIBenchmark::runBenchmark in switchType. - * Does the actual heavy lifting. - * - * @tparam Clock Clock type to use. - */ - template< typename Clock > - struct BenchmarkExecution - { - MPIBenchmark< DatasetFillerProvider > * m_benchmark; + /** + * Main function for running a benchmark. The benchmark is repeated for all + * previously requested compressions strategies, backends and thread sizes. + * @tparam Clock Clock type to use. + * @param rootThread Rank at which the report will be read. + * @return A report about the time needed for writing and reading under each + * compression strategy. + */ + template + MPIBenchmarkReport + runBenchmark(int rootThread = 0); + +private: + std::string m_basePath; + std::vector> + m_configurations; + + enum Config + { + COMPRESSION = 0, + COMPRESSION_LEVEL, + BACKEND, + NRANKS, + DTYPE, + ITERATIONS + }; + std::pair slice(int size); - explicit BenchmarkExecution( MPIBenchmark< DatasetFillerProvider > * benchmark ) : - m_benchmark { benchmark } - {} + /** + * @brief Struct used by MPIBenchmark::runBenchmark in switchType. + * Does the actual heavy lifting. + * + * @tparam Clock Clock type to use. + */ + template + struct BenchmarkExecution + { + MPIBenchmark *m_benchmark; + explicit BenchmarkExecution( + MPIBenchmark *benchmark) + : m_benchmark{benchmark} + {} /** * Execute a single read benchmark. @@ -203,401 +196,286 @@ namespace openPMD * @param iterations The number of iterations to write. * @return The time passed. */ - template< - typename T - > - typename Clock::duration writeBenchmark( - std::string const & compression, - uint8_t level, - Offset & offset, - Extent & extent, - std::string const & extension, - std::shared_ptr< DatasetFiller< T >> datasetFiller, - typename decltype( Series::iterations)::key_type iterations - ); - - /** - * Execute a single read benchmark. - * @tparam T Type of the dataset to read. - * @param offset Local offset of the chunk to read. - * @param extent Local extent of the chunk to read. - * @param extension File extension to control the openPMD backend. - * @param iterations The number of iterations to read. - * @return The time passed. - */ - template< - typename T - > - typename Clock::duration readBenchmark( - Offset & offset, - Extent & extent, - std::string extension, - typename decltype( Series::iterations)::key_type iterations - ); - - template< typename T > - void operator()( - MPIBenchmarkReport< typename Clock::duration > & report, - int rootThread = 0 - ); - - - template< int n > - void operator()( - MPIBenchmarkReport< typename Clock::duration > &, - int - ); - }; - }; - - - // Implementation - + template + typename Clock::duration writeBenchmark( + std::string const &compression, + uint8_t level, + Offset &offset, + Extent &extent, + std::string const &extension, + std::shared_ptr> datasetFiller, + typename decltype(Series::iterations)::key_type iterations); + /** + * Execute a single read benchmark. + * @tparam T Type of the dataset to read. + * @param offset Local offset of the chunk to read. + * @param extent Local extent of the chunk to read. + * @param extension File extension to control the openPMD backend. + * @param iterations The number of iterations to read. + * @return The time passed. + */ + template + typename Clock::duration readBenchmark( + Offset &offset, + Extent &extent, + std::string extension, + typename decltype(Series::iterations)::key_type iterations); + + template + void operator()( + MPIBenchmarkReport &report, + int rootThread = 0); + + template + void operator()(MPIBenchmarkReport &, int); + }; +}; +// Implementation +template +template +MPIBenchmarkReport +MPIBenchmark::runBenchmark(int rootThread) +{ + MPIBenchmarkReport res{this->communicator}; + BenchmarkExecution exec{this}; - template< typename DatasetFillerProvider > - template< typename Clock > - MPIBenchmarkReport< typename Clock::duration > - MPIBenchmark< DatasetFillerProvider >::runBenchmark( - int rootThread - ) + std::set datatypes; + for (auto const &conf : m_configurations) { - MPIBenchmarkReport< typename Clock::duration > res{this->communicator}; - BenchmarkExecution< Clock > exec { this }; - - std::set< Datatype > datatypes; - for( auto const & conf: m_configurations ) - { - datatypes.insert( std::get< DTYPE >( conf ) ); - } - for( Datatype dt: datatypes ) - { - switchType( - dt, - exec, - res, - rootThread - ); - } - - return res; + datatypes.insert(std::get(conf)); } - - - template< typename DatasetFillerProvider > - MPIBenchmark< DatasetFillerProvider >::MPIBenchmark( - std::string basePath, - Extent tExtent, - std::shared_ptr< BlockSlicer > blockSlicer, - DatasetFillerProvider dfp, - MPI_Comm comm - ): - communicator { comm }, - totalExtent { std::move( tExtent ) }, - m_blockSlicer { std::move( blockSlicer ) }, - m_dfp { dfp }, - m_basePath { std::move( basePath ) } + for (Datatype dt : datatypes) { - if( m_blockSlicer == nullptr ) - throw std::runtime_error("Argument blockSlicer cannot be a nullptr!"); + switchType(dt, exec, res, rootThread); } + return res; +} - template< typename DatasetFillerProvider > - std::pair< - Offset, - Extent - > MPIBenchmark< DatasetFillerProvider >::slice( int size ) - { - int actualSize; - MPI_Comm_size( - this->communicator, - &actualSize - ); - int rank; - MPI_Comm_rank( - this->communicator, - &rank - ); - size = std::min( - size, - actualSize - ); - return m_blockSlicer->sliceBlock( - totalExtent, - size, - rank - ); - } +template +MPIBenchmark::MPIBenchmark( + std::string basePath, + Extent tExtent, + std::shared_ptr blockSlicer, + DatasetFillerProvider dfp, + MPI_Comm comm) + : communicator{comm} + , totalExtent{std::move(tExtent)} + , m_blockSlicer{std::move(blockSlicer)} + , m_dfp{dfp} + , m_basePath{std::move(basePath)} +{ + if (m_blockSlicer == nullptr) + throw std::runtime_error("Argument blockSlicer cannot be a nullptr!"); +} +template +std::pair MPIBenchmark::slice(int size) +{ + int actualSize; + MPI_Comm_size(this->communicator, &actualSize); + int rank; + MPI_Comm_rank(this->communicator, &rank); + size = std::min(size, actualSize); + return m_blockSlicer->sliceBlock(totalExtent, size, rank); +} - template< typename DatasetFillerProvider > - void MPIBenchmark< DatasetFillerProvider >::addConfiguration( - std::string compression, - uint8_t compressionLevel, - std::string backend, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations, - int threadSize - ) - { - this->m_configurations - .emplace_back( - compression, - compressionLevel, - backend, - threadSize, - dt, - iterations - ); - } +template +void MPIBenchmark::addConfiguration( + std::string compression, + uint8_t compressionLevel, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations, + int threadSize) +{ + this->m_configurations.emplace_back( + compression, compressionLevel, backend, threadSize, dt, iterations); +} +template +void MPIBenchmark::addConfiguration( + std::string compression, + uint8_t compressionLevel, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations) +{ + int size; + MPI_Comm_size(communicator, &size); + addConfiguration( + compression, compressionLevel, backend, dt, iterations, size); +} - template< typename DatasetFillerProvider > - void MPIBenchmark< DatasetFillerProvider >::addConfiguration( - std::string compression, - uint8_t compressionLevel, - std::string backend, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations - ) - { - int size; - MPI_Comm_size( - communicator, - &size - ); - addConfiguration( - compression, - compressionLevel, - backend, - dt, - iterations, - size - ); - } +template +void MPIBenchmark::resetConfigurations() +{ + this->m_compressions.clear(); +} +template +template +template +typename Clock::duration +MPIBenchmark::BenchmarkExecution::writeBenchmark( + std::string const &compression, + uint8_t level, + Offset &offset, + Extent &extent, + std::string const &extension, + std::shared_ptr> datasetFiller, + typename decltype(Series::iterations)::key_type iterations) +{ + MPI_Barrier(m_benchmark->communicator); + auto start = Clock::now(); - template< typename DatasetFillerProvider > - void MPIBenchmark< DatasetFillerProvider >::resetConfigurations( ) + // open file for writing + Series series = Series( + m_benchmark->m_basePath + "." + extension, + Access::CREATE, + m_benchmark->communicator); + + for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; + i++) { - this->m_compressions - .clear( ); - } + auto writeData = datasetFiller->produceData(); + MeshRecordComponent id = + series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; - template< typename DatasetFillerProvider > - template< typename Clock > - template< typename T > - typename Clock::duration - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::writeBenchmark( - std::string const & compression, - uint8_t level, - Offset & offset, - Extent & extent, - std::string const & extension, - std::shared_ptr< DatasetFiller< T >> datasetFiller, - typename decltype( Series::iterations)::key_type iterations - ) - { - MPI_Barrier( m_benchmark->communicator ); - auto start = Clock::now( ); - - // open file for writing - Series series = Series( - m_benchmark->m_basePath + "." + extension, - Access::CREATE, - m_benchmark->communicator - ); - - for( typename decltype( Series::iterations)::key_type i = 0; - i < iterations; - i++ ) + Datatype datatype = determineDatatype(writeData); + Dataset dataset = Dataset(datatype, m_benchmark->totalExtent); + if (!compression.empty()) { - auto writeData = datasetFiller->produceData( ); - - - MeshRecordComponent - id = - series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; - - Datatype datatype = determineDatatype( writeData ); - Dataset dataset = Dataset( - datatype, - m_benchmark->totalExtent - ); - if( !compression.empty( ) ) - { - dataset.setCompression( - compression, - level - ); - } - - id.resetDataset( dataset ); - - series.flush( ); - - id.storeChunk< T >( - writeData, - offset, - extent - ); - series.flush( ); + dataset.setCompression(compression, level); } - MPI_Barrier( m_benchmark->communicator ); - auto end = Clock::now( ); + id.resetDataset(dataset); - // deduct the time needed for data generation - for( typename decltype( Series::iterations)::key_type i = 0; - i < iterations; - i++ ) - { - datasetFiller->produceData( ); - } - auto deduct = Clock::now( ); + series.flush(); - return end - start - ( deduct - end ); + id.storeChunk(writeData, offset, extent); + series.flush(); } + MPI_Barrier(m_benchmark->communicator); + auto end = Clock::now(); - template< typename DatasetFillerProvider > - template< typename Clock > - template< typename T > - typename Clock::duration - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::readBenchmark( - Offset & offset, - Extent & extent, - std::string extension, - typename decltype( Series::iterations)::key_type iterations - ) + // deduct the time needed for data generation + for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; + i++) { - MPI_Barrier( m_benchmark->communicator ); - // let every thread measure time - auto start = Clock::now( ); - - Series series = Series( - m_benchmark->m_basePath + "." + extension, - Access::READ_ONLY, - m_benchmark->communicator - ); - - for( typename decltype( Series::iterations)::key_type i = 0; - i < iterations; - i++ ) - { - MeshRecordComponent - id = - series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; + datasetFiller->produceData(); + } + auto deduct = Clock::now(); + return end - start - (deduct - end); +} - auto chunk_data = id.loadChunk< T >( - offset, - extent - ); - series.flush( ); - } +template +template +template +typename Clock::duration +MPIBenchmark::BenchmarkExecution::readBenchmark( + Offset &offset, + Extent &extent, + std::string extension, + typename decltype(Series::iterations)::key_type iterations) +{ + MPI_Barrier(m_benchmark->communicator); + // let every thread measure time + auto start = Clock::now(); - MPI_Barrier( m_benchmark->communicator ); - auto end = Clock::now( ); - return end - start; + Series series = Series( + m_benchmark->m_basePath + "." + extension, + Access::READ_ONLY, + m_benchmark->communicator); + + for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; + i++) + { + MeshRecordComponent id = + series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; + + auto chunk_data = id.loadChunk(offset, extent); + series.flush(); } + MPI_Barrier(m_benchmark->communicator); + auto end = Clock::now(); + return end - start; +} - template< typename DatasetFillerProvider > - template< typename Clock > - template< typename T > - void - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::operator()( - MPIBenchmarkReport< typename Clock::duration > & report, - int rootThread - ) +template +template +template +void MPIBenchmark::BenchmarkExecution::operator()( + MPIBenchmarkReport &report, int rootThread) +{ + Datatype dt = determineDatatype(); + auto dsf = std::dynamic_pointer_cast>( + m_benchmark->m_dfp.template operator()()); + for (auto const &config : m_benchmark->m_configurations) { - Datatype dt = determineDatatype< T >( ); - auto dsf = std::dynamic_pointer_cast< DatasetFiller< T>>( - m_benchmark->m_dfp - .template operator( - )< T >( ) - ); - for( auto const & config: m_benchmark->m_configurations ) - { - std::string compression; - uint8_t compressionLevel; - std::string backend; - int size; - Datatype dt2; - typename decltype( Series::iterations)::key_type iterations; - std::tie( - compression, - compressionLevel, - backend, - size, - dt2, - iterations - ) = config; - - if( dt != dt2 ) - { - continue; - } - - auto localCuboid = m_benchmark->slice( size ); - - extentT blockSize = 1; - for( auto ext: localCuboid.second ) - { - blockSize *= ext; - } - dsf->setNumberOfItems( blockSize ); - - auto writeTime = writeBenchmark< T >( - compression, - compressionLevel, - localCuboid.first, - localCuboid.second, - backend, - dsf, - iterations - ); - auto readTime = readBenchmark< T >( - localCuboid.first, - localCuboid.second, - backend, - iterations - ); - report.addReport( - rootThread, - compression, - compressionLevel, - backend, - size, - dt2, - iterations, - std::make_pair( - writeTime, - readTime - ) - ); + std::string compression; + uint8_t compressionLevel; + std::string backend; + int size; + Datatype dt2; + typename decltype(Series::iterations)::key_type iterations; + std::tie( + compression, compressionLevel, backend, size, dt2, iterations) = + config; + if (dt != dt2) + { + continue; } - } + auto localCuboid = m_benchmark->slice(size); - template< typename DatasetFillerProvider > - template< typename Clock > - template< int n > - void - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::operator()( - MPIBenchmarkReport< typename Clock::duration > &, - int - ) - { - throw std::runtime_error( "Unknown/unsupported datatype requested to be benchmarked." ); + extentT blockSize = 1; + for (auto ext : localCuboid.second) + { + blockSize *= ext; + } + dsf->setNumberOfItems(blockSize); + + auto writeTime = writeBenchmark( + compression, + compressionLevel, + localCuboid.first, + localCuboid.second, + backend, + dsf, + iterations); + auto readTime = readBenchmark( + localCuboid.first, localCuboid.second, backend, iterations); + report.addReport( + rootThread, + compression, + compressionLevel, + backend, + size, + dt2, + iterations, + std::make_pair(writeTime, readTime)); } +} +template +template +template +void MPIBenchmark::BenchmarkExecution::operator()( + MPIBenchmarkReport &, int) +{ + throw std::runtime_error( + "Unknown/unsupported datatype requested to be benchmarked."); } +} // namespace openPMD + #endif diff --git a/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp b/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp index eb43458bc2..9b0377fbb6 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp @@ -27,361 +27,265 @@ #include "openPMD/Datatype.hpp" #include "openPMD/Series.hpp" +#include "string.h" #include +#include #include #include -#include -#include "string.h" - namespace openPMD { - /** - * The report for a single benchmark produced by . - * @tparam Duration Datatype to be used for storing a time interval. - */ - template< typename Duration > - struct MPIBenchmarkReport - { - MPI_Comm communicator; - - MPIBenchmarkReport(MPI_Comm); - - /** - * Time needed for writing and reading per compression strategy and level. - */ - std::map< - std::tuple< - int, // rank - std::string, // compression - uint8_t, // compression level - std::string, // extension - int, // thread size - Datatype, - typename decltype( Series::iterations )::key_type - >, - std::pair< - Duration, - Duration - > - > durations; - - enum Selector - { - RANK = 0, - COMPRESSION, - COMPRESSION_LEVEL, - BACKEND, - NRANKS, - DTYPE, - ITERATIONS - }; - - /** - * Add results for a certain compression strategy and level. - * - * @param rootThread The MPI rank which will collect the data. - * @param compression Compression strategy. - * @param level Compression level - * @param extension The openPMD filename extension. - * @param threadSize The MPI size. - * @param dt The openPMD datatype. - * @param iterations The number of iterations per compression strategy. - * @param report A pair of write and read time measurements. - */ - void addReport( - int rootThread, - std::string compression, - uint8_t level, - std::string extension, - int threadSize, - Datatype dt, - typename decltype( Series::iterations )::key_type iterations, - std::pair< - Duration, - Duration - > const & report - ); - - /** Retrieve the time measured for a certain compression strategy. - * - * @param rank Which MPI rank's duration results to retrieve. - * @param compression Compression strategy. - * @param level Compression level - * @param extension The openPMD filename extension. - * @param threadSize The MPI size. - * @param dt The openPMD datatype. - * @param iterations The number of iterations per compression strategy. - * @return A pair of write and read time measurements. - */ - std::pair< - Duration, - Duration - > getReport( - int rank, - std::string compression, - uint8_t level, - std::string extension, - int threadSize, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations - ); - - private: - template< - typename D, - typename Dummy = D - > - struct MPIDatatype - { - }; +/** + * The report for a single benchmark produced by + * . + * @tparam Duration Datatype to be used for storing a time interval. + */ +template +struct MPIBenchmarkReport +{ + MPI_Comm communicator; + MPIBenchmarkReport(MPI_Comm); - template< typename Dummy > - struct MPIDatatype< - char, - Dummy - > - { - MPI_Datatype dt = MPI_CHAR; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned char, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED_CHAR; - }; - template< typename Dummy > - struct MPIDatatype< - short, - Dummy - > - { - MPI_Datatype dt = MPI_SHORT; - }; - template< typename Dummy > - struct MPIDatatype< - int, - Dummy - > - { - MPI_Datatype dt = MPI_INT; - }; - template< typename Dummy > - struct MPIDatatype< - long, - Dummy - > - { - MPI_Datatype dt = MPI_LONG; - }; - template< typename Dummy > - struct MPIDatatype< - float, - Dummy - > - { - MPI_Datatype dt = MPI_FLOAT; - }; - template< typename Dummy > - struct MPIDatatype< - double, - Dummy - > - { - MPI_Datatype dt = MPI_DOUBLE; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned short, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED_SHORT; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned int, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned long, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED_LONG; - }; - template< typename Dummy > - struct MPIDatatype< - long double, - Dummy - > - { - MPI_Datatype dt = MPI_LONG_DOUBLE; - }; - template< typename Dummy > - struct MPIDatatype< - long long, - Dummy - > - { - MPI_Datatype dt = MPI_LONG_LONG_INT; - }; + /** + * Time needed for writing and reading per compression strategy and level. + */ + std::map< + std::tuple< + int, // rank + std::string, // compression + uint8_t, // compression level + std::string, // extension + int, // thread size + Datatype, + typename decltype(Series::iterations)::key_type>, + std::pair > + durations; - MPIDatatype< typename Duration::rep > m_mpiDatatype; - MPI_Datatype mpiType = m_mpiDatatype.dt; + enum Selector + { + RANK = 0, + COMPRESSION, + COMPRESSION_LEVEL, + BACKEND, + NRANKS, + DTYPE, + ITERATIONS }; - // implementation - - - template< typename Duration > - void MPIBenchmarkReport< Duration >::addReport( + /** + * Add results for a certain compression strategy and level. + * + * @param rootThread The MPI rank which will collect the data. + * @param compression Compression strategy. + * @param level Compression level + * @param extension The openPMD filename extension. + * @param threadSize The MPI size. + * @param dt The openPMD datatype. + * @param iterations The number of iterations per compression strategy. + * @param report A pair of write and read time measurements. + */ + void addReport( int rootThread, std::string compression, uint8_t level, std::string extension, int threadSize, Datatype dt, - typename decltype( Series::iterations )::key_type iterations, - std::pair< - Duration, - Duration - > const & report - ) - { - using rep = typename Duration::rep; - //auto mpi_dt = MPIDatatype::dt; - int rank; - MPI_Comm_rank( - communicator, - &rank - ); - int size; - MPI_Comm_size( - communicator, - &size - ); - MPI_Comm restricted; - MPI_Comm_split( - communicator, - rank < threadSize ? 0 : MPI_UNDEFINED, - rank, - &restricted - ); - rep readWrite[2]; - if( rank < threadSize ) - { - readWrite[0] = - report.first - .count( ); - readWrite[1] = - report.second - .count( ); - } - rep * recv = nullptr; - if( rank == rootThread ) - { - recv = new rep[2 * threadSize]; - } - - if( restricted != MPI_COMM_NULL ) - { - MPI_Gather( - readWrite, - 2, // should be 2 but doesnt work then.. - this->mpiType, - recv, - 2, - this->mpiType, - rootThread, - restricted - ); - } - - - if( rank == rootThread ) - { - for( int i = 0; i < threadSize; i++ ) - { - Duration dWrite { recv[2 * i] }; - Duration dRead { recv[2 * i + 1] }; - this->durations - .emplace( - std::make_tuple( - i, - compression, - level, - extension, - threadSize, - dt, - iterations - ), - std::make_pair( - dWrite, - dRead - ) - ); - } - delete[] recv; - } - if( restricted != MPI_COMM_NULL ) - { - MPI_Comm_free( &restricted ); - } - } + typename decltype(Series::iterations)::key_type iterations, + std::pair const &report); - template< typename Duration > - MPIBenchmarkReport< Duration >::MPIBenchmarkReport( MPI_Comm comm ): - communicator {comm} - {} - - template< typename Duration > - std::pair< - Duration, - Duration - > MPIBenchmarkReport< Duration >::getReport( + /** Retrieve the time measured for a certain compression strategy. + * + * @param rank Which MPI rank's duration results to retrieve. + * @param compression Compression strategy. + * @param level Compression level + * @param extension The openPMD filename extension. + * @param threadSize The MPI size. + * @param dt The openPMD datatype. + * @param iterations The number of iterations per compression strategy. + * @return A pair of write and read time measurements. + */ + std::pair getReport( int rank, std::string compression, uint8_t level, std::string extension, int threadSize, Datatype dt, - typename decltype( Series::iterations )::key_type iterations - ) + typename decltype(Series::iterations)::key_type iterations); + +private: + template + struct MPIDatatype + {}; + + template + struct MPIDatatype { - auto - it = - this->durations - .find( - std::make_tuple( - rank, - compression, - level, - extension, - threadSize, - dt, - iterations - ) - ); - if( it == - this->durations - .end( ) ) - { - throw std::runtime_error( "Requested report not found. (Reports are available on the root thread only)" ); - } - else + MPI_Datatype dt = MPI_CHAR; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED_CHAR; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_SHORT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_INT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_LONG; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_FLOAT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_DOUBLE; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED_SHORT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED_LONG; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_LONG_DOUBLE; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_LONG_LONG_INT; + }; + + MPIDatatype m_mpiDatatype; + MPI_Datatype mpiType = m_mpiDatatype.dt; +}; + +// implementation + +template +void MPIBenchmarkReport::addReport( + int rootThread, + std::string compression, + uint8_t level, + std::string extension, + int threadSize, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations, + std::pair const &report) +{ + using rep = typename Duration::rep; + // auto mpi_dt = MPIDatatype::dt; + int rank; + MPI_Comm_rank(communicator, &rank); + int size; + MPI_Comm_size(communicator, &size); + MPI_Comm restricted; + MPI_Comm_split( + communicator, rank < threadSize ? 0 : MPI_UNDEFINED, rank, &restricted); + rep readWrite[2]; + if (rank < threadSize) + { + readWrite[0] = report.first.count(); + readWrite[1] = report.second.count(); + } + rep *recv = nullptr; + if (rank == rootThread) + { + recv = new rep[2 * threadSize]; + } + + if (restricted != MPI_COMM_NULL) + { + MPI_Gather( + readWrite, + 2, // should be 2 but doesnt work then.. + this->mpiType, + recv, + 2, + this->mpiType, + rootThread, + restricted); + } + + if (rank == rootThread) + { + for (int i = 0; i < threadSize; i++) { - return it->second; + Duration dWrite{recv[2 * i]}; + Duration dRead{recv[2 * i + 1]}; + this->durations.emplace( + std::make_tuple( + i, + compression, + level, + extension, + threadSize, + dt, + iterations), + std::make_pair(dWrite, dRead)); } + delete[] recv; } + if (restricted != MPI_COMM_NULL) + { + MPI_Comm_free(&restricted); + } +} + +template +MPIBenchmarkReport::MPIBenchmarkReport(MPI_Comm comm) + : communicator{comm} +{} +template +std::pair MPIBenchmarkReport::getReport( + int rank, + std::string compression, + uint8_t level, + std::string extension, + int threadSize, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations) +{ + auto it = this->durations.find(std::make_tuple( + rank, compression, level, extension, threadSize, dt, iterations)); + if (it == this->durations.end()) + { + throw std::runtime_error( + "Requested report not found. (Reports are available on the root " + "thread only)"); + } + else + { + return it->second; + } } +} // namespace openPMD + #endif diff --git a/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp b/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp index 9569fd16b7..78f955524b 100644 --- a/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp +++ b/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp @@ -24,24 +24,16 @@ #include "openPMD/Dataset.hpp" #include "openPMD/benchmark/mpi/BlockSlicer.hpp" - namespace openPMD { - class OneDimensionalBlockSlicer : - public BlockSlicer - { - public: - Extent::value_type m_dim; +class OneDimensionalBlockSlicer : public BlockSlicer +{ +public: + Extent::value_type m_dim; - explicit OneDimensionalBlockSlicer( Extent::value_type dim = 0 ); + explicit OneDimensionalBlockSlicer(Extent::value_type dim = 0); - std::pair< - Offset, - Extent - > sliceBlock( - Extent & totalExtent, - int size, - int rank - ) override; - }; -} + std::pair + sliceBlock(Extent &totalExtent, int size, int rank) override; +}; +} // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp b/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp index c384eeb5aa..786d4a134d 100644 --- a/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp +++ b/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp @@ -21,132 +21,98 @@ #pragma once - -#include "openPMD/benchmark/mpi/DatasetFiller.hpp" #include "openPMD/Dataset.hpp" -#include +#include "openPMD/benchmark/mpi/DatasetFiller.hpp" #include - +#include namespace openPMD { - template< - typename Distr, - typename T = typename Distr::result_type - > - class RandomDatasetFiller : - public DatasetFiller< T > - { - - private: - Distr distr; - std::default_random_engine engine; - std::shared_ptr< T > buffered; - public: - using resultType = T; - - - explicit RandomDatasetFiller( - Distr distribution, - Extent::value_type numOfItems = 0 - ) : - DatasetFiller< T >( numOfItems ), - distr( distribution ) - {} - - - std::shared_ptr< T > produceData( ) override - { - if( this->buffered ) - { - return buffered; - } - auto res = std::shared_ptr< T > { - new T[this->m_numberOfItems], - []( T * d ) - { - delete[] d; - } - }; - auto ptr = res.get( ); - for( typename Extent::value_type i = 0; - i < this->m_numberOfItems; - i++ ) - { - ptr[i] = this->distr( this->engine ); - } - return res; - } +template +class RandomDatasetFiller : public DatasetFiller +{ +private: + Distr distr; + std::default_random_engine engine; + std::shared_ptr buffered; - /** - * - * @tparam X Dummy template parameter such that the RandomDatasetFiller is - * usable also when this function's implementation does not work on the - * distribution's concrete type. - * @param numberOfItems Number of items to be produced per call of - * produceData. - * @param lower Lower bound for the random values to be generated. - * @param upper Upper bound for the random values to be generated. - * @return An instance of RandomDatasetFiller matching the given parameters. - */ - template< typename X = Distr > - static RandomDatasetFiller< - X, - T - > makeRandomDatasetFiller( - Extent::value_type numberOfItems, - typename X::result_type lower, - typename X::result_type upper - ) - { - return RandomDatasetFiller< X >( - X( - lower, - upper - ), - numberOfItems - ); - } +public: + using resultType = T; + explicit RandomDatasetFiller( + Distr distribution, Extent::value_type numOfItems = 0) + : DatasetFiller(numOfItems), distr(distribution) + {} - void setSeed( std::default_random_engine::result_type seed ) + std::shared_ptr produceData() override + { + if (this->buffered) { - this->engine = std::default_random_engine( seed ); + return buffered; } - - - void randomSeed( ) + auto res = std::shared_ptr{ + new T[this->m_numberOfItems], [](T *d) { delete[] d; }}; + auto ptr = res.get(); + for (typename Extent::value_type i = 0; i < this->m_numberOfItems; i++) { - std::random_device rd; - this->engine = std::default_random_engine( rd( ) ); + ptr[i] = this->distr(this->engine); } + return res; + } + + /** + * + * @tparam X Dummy template parameter such that the RandomDatasetFiller is + * usable also when this function's implementation does not work on the + * distribution's concrete type. + * @param numberOfItems Number of items to be produced per call of + * produceData. + * @param lower Lower bound for the random values to be generated. + * @param upper Upper bound for the random values to be generated. + * @return An instance of RandomDatasetFiller matching the given parameters. + */ + template + static RandomDatasetFiller makeRandomDatasetFiller( + Extent::value_type numberOfItems, + typename X::result_type lower, + typename X::result_type upper) + { + return RandomDatasetFiller(X(lower, upper), numberOfItems); + } + void setSeed(std::default_random_engine::result_type seed) + { + this->engine = std::default_random_engine(seed); + } - /** - * Activate buffer mode. Create a bunch of data to write (instantly) - * and return that upon calling (). - */ - void bufferMode( ) + void randomSeed() + { + std::random_device rd; + this->engine = std::default_random_engine(rd()); + } + + /** + * Activate buffer mode. Create a bunch of data to write (instantly) + * and return that upon calling (). + */ + void bufferMode() + { + if (!this->buffered) { - if( !this->buffered ) - { - this->buffered = this->produceData( ); - } + this->buffered = this->produceData(); } + } - - void setNumberOfItems( Extent::value_type numItems ) override + void setNumberOfItems(Extent::value_type numItems) override + { + this->m_numberOfItems = numItems; + if (this->buffered) { - this->m_numberOfItems = numItems; - if( this->buffered ) - { - this->buffered - .reset( ); - this->buffered = this->produceData( ); - } + this->buffered.reset(); + this->buffered = this->produceData(); } + } +}; - }; - -} +} // namespace openPMD diff --git a/include/openPMD/binding/python/Numpy.hpp b/include/openPMD/binding/python/Numpy.hpp index 68bf131a9b..8e88994148 100644 --- a/include/openPMD/binding/python/Numpy.hpp +++ b/include/openPMD/binding/python/Numpy.hpp @@ -23,197 +23,198 @@ #include "openPMD/Datatype.hpp" #include "openPMD/binding/python/Variant.hpp" +#include #include #include -#include -#include #include - +#include namespace openPMD { - inline Datatype - dtype_from_numpy( pybind11::dtype const dt ) +inline Datatype dtype_from_numpy(pybind11::dtype const dt) +{ + // ref: https://docs.scipy.org/doc/numpy/user/basics.types.html + // ref: https://github.com/numpy/numpy/issues/10678#issuecomment-369363551 + if (dt.is(pybind11::dtype("b"))) + return Datatype::CHAR; + else if (dt.is(pybind11::dtype("B"))) + return Datatype::UCHAR; + else if (dt.is(pybind11::dtype("short"))) + return Datatype::SHORT; + else if (dt.is(pybind11::dtype("intc"))) + return Datatype::INT; + else if (dt.is(pybind11::dtype("int_"))) + return Datatype::LONG; + else if (dt.is(pybind11::dtype("longlong"))) + return Datatype::LONGLONG; + else if (dt.is(pybind11::dtype("ushort"))) + return Datatype::USHORT; + else if (dt.is(pybind11::dtype("uintc"))) + return Datatype::UINT; + else if (dt.is(pybind11::dtype("uint"))) + return Datatype::ULONG; + else if (dt.is(pybind11::dtype("ulonglong"))) + return Datatype::ULONGLONG; + else if (dt.is(pybind11::dtype("clongdouble"))) + return Datatype::CLONG_DOUBLE; + else if (dt.is(pybind11::dtype("cdouble"))) + return Datatype::CDOUBLE; + else if (dt.is(pybind11::dtype("csingle"))) + return Datatype::CFLOAT; + else if (dt.is(pybind11::dtype("longdouble"))) + return Datatype::LONG_DOUBLE; + else if (dt.is(pybind11::dtype("double"))) + return Datatype::DOUBLE; + else if (dt.is(pybind11::dtype("single"))) + return Datatype::FLOAT; + else if (dt.is(pybind11::dtype("bool"))) + return Datatype::BOOL; + else { - // ref: https://docs.scipy.org/doc/numpy/user/basics.types.html - // ref: https://github.com/numpy/numpy/issues/10678#issuecomment-369363551 - if( dt.is(pybind11::dtype("b")) ) - return Datatype::CHAR; - else if( dt.is(pybind11::dtype("B")) ) - return Datatype::UCHAR; - else if( dt.is(pybind11::dtype("short")) ) - return Datatype::SHORT; - else if( dt.is(pybind11::dtype("intc")) ) - return Datatype::INT; - else if( dt.is(pybind11::dtype("int_")) ) - return Datatype::LONG; - else if( dt.is(pybind11::dtype("longlong")) ) - return Datatype::LONGLONG; - else if( dt.is(pybind11::dtype("ushort")) ) - return Datatype::USHORT; - else if( dt.is(pybind11::dtype("uintc")) ) - return Datatype::UINT; - else if( dt.is(pybind11::dtype("uint")) ) - return Datatype::ULONG; - else if( dt.is(pybind11::dtype("ulonglong")) ) - return Datatype::ULONGLONG; - else if( dt.is(pybind11::dtype("clongdouble")) ) - return Datatype::CLONG_DOUBLE; - else if( dt.is(pybind11::dtype("cdouble")) ) - return Datatype::CDOUBLE; - else if( dt.is(pybind11::dtype("csingle")) ) - return Datatype::CFLOAT; - else if( dt.is(pybind11::dtype("longdouble")) ) - return Datatype::LONG_DOUBLE; - else if( dt.is(pybind11::dtype("double")) ) - return Datatype::DOUBLE; - else if( dt.is(pybind11::dtype("single")) ) - return Datatype::FLOAT; - else if( dt.is(pybind11::dtype("bool")) ) - return Datatype::BOOL; - else { - pybind11::print(dt); - throw std::runtime_error("Datatype '...' not known in 'dtype_from_numpy'!"); // _s.format(dt) - } + pybind11::print(dt); + throw std::runtime_error( + "Datatype '...' not known in 'dtype_from_numpy'!"); // _s.format(dt) } +} - /** Return openPMD::Datatype from py::buffer_info::format - */ - inline Datatype - dtype_from_bufferformat( std::string const & fmt ) - { - using DT = Datatype; +/** Return openPMD::Datatype from py::buffer_info::format + */ +inline Datatype dtype_from_bufferformat(std::string const &fmt) +{ + using DT = Datatype; - // refs: - // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html - // https://docs.python.org/3/library/struct.html#format-characters - // std::cout << " scalar type '" << fmt << "'" << std::endl; - // typestring: encoding + type + number of bytes - if( fmt.find("?") != std::string::npos ) - return DT::BOOL; - else if( fmt.find("b") != std::string::npos ) - return DT::CHAR; - else if( fmt.find("h") != std::string::npos ) - return DT::SHORT; - else if( fmt.find("i") != std::string::npos ) - return DT::INT; - else if( fmt.find("l") != std::string::npos ) - return DT::LONG; - else if( fmt.find("q") != std::string::npos ) - return DT::LONGLONG; - else if( fmt.find("B") != std::string::npos ) - return DT::UCHAR; - else if( fmt.find("H") != std::string::npos ) - return DT::USHORT; - else if( fmt.find("I") != std::string::npos ) - return DT::UINT; - else if( fmt.find("L") != std::string::npos ) - return DT::ULONG; - else if( fmt.find("Q") != std::string::npos ) - return DT::ULONGLONG; - else if( fmt.find("Zf") != std::string::npos ) - return DT::CFLOAT; - else if( fmt.find("Zd") != std::string::npos ) - return DT::CDOUBLE; - else if( fmt.find("Zg") != std::string::npos ) - return DT::CLONG_DOUBLE; - else if( fmt.find("f") != std::string::npos ) - return DT::FLOAT; - else if( fmt.find("d") != std::string::npos ) - return DT::DOUBLE; - else if( fmt.find("g") != std::string::npos ) - return DT::LONG_DOUBLE; - else - throw std::runtime_error("dtype_from_bufferformat: Unknown " - "Python type '" + fmt + "'"); - } + // refs: + // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html + // https://docs.python.org/3/library/struct.html#format-characters + // std::cout << " scalar type '" << fmt << "'" << std::endl; + // typestring: encoding + type + number of bytes + if (fmt.find("?") != std::string::npos) + return DT::BOOL; + else if (fmt.find("b") != std::string::npos) + return DT::CHAR; + else if (fmt.find("h") != std::string::npos) + return DT::SHORT; + else if (fmt.find("i") != std::string::npos) + return DT::INT; + else if (fmt.find("l") != std::string::npos) + return DT::LONG; + else if (fmt.find("q") != std::string::npos) + return DT::LONGLONG; + else if (fmt.find("B") != std::string::npos) + return DT::UCHAR; + else if (fmt.find("H") != std::string::npos) + return DT::USHORT; + else if (fmt.find("I") != std::string::npos) + return DT::UINT; + else if (fmt.find("L") != std::string::npos) + return DT::ULONG; + else if (fmt.find("Q") != std::string::npos) + return DT::ULONGLONG; + else if (fmt.find("Zf") != std::string::npos) + return DT::CFLOAT; + else if (fmt.find("Zd") != std::string::npos) + return DT::CDOUBLE; + else if (fmt.find("Zg") != std::string::npos) + return DT::CLONG_DOUBLE; + else if (fmt.find("f") != std::string::npos) + return DT::FLOAT; + else if (fmt.find("d") != std::string::npos) + return DT::DOUBLE; + else if (fmt.find("g") != std::string::npos) + return DT::LONG_DOUBLE; + else + throw std::runtime_error( + "dtype_from_bufferformat: Unknown " + "Python type '" + + fmt + "'"); +} - inline pybind11::dtype - dtype_to_numpy( Datatype const dt ) +inline pybind11::dtype dtype_to_numpy(Datatype const dt) +{ + using DT = Datatype; + switch (dt) { - using DT = Datatype; - switch( dt ) - { - case DT::CHAR: - case DT::VEC_CHAR: - case DT::STRING: - case DT::VEC_STRING: - return pybind11::dtype("b"); - break; - case DT::UCHAR: - case DT::VEC_UCHAR: - return pybind11::dtype("B"); - break; - // case DT::SCHAR: - // case DT::VEC_SCHAR: - // pybind11::dtype("b"); - // break; - case DT::SHORT: - case DT::VEC_SHORT: - return pybind11::dtype("short"); - break; - case DT::INT: - case DT::VEC_INT: - return pybind11::dtype("intc"); - break; - case DT::LONG: - case DT::VEC_LONG: - return pybind11::dtype("int_"); - break; - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return pybind11::dtype("longlong"); - break; - case DT::USHORT: - case DT::VEC_USHORT: - return pybind11::dtype("ushort"); - break; - case DT::UINT: - case DT::VEC_UINT: - return pybind11::dtype("uintc"); - break; - case DT::ULONG: - case DT::VEC_ULONG: - return pybind11::dtype("uint"); - break; - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return pybind11::dtype("ulonglong"); - break; - case DT::FLOAT: - case DT::VEC_FLOAT: - return pybind11::dtype("single"); - break; - case DT::DOUBLE: - case DT::VEC_DOUBLE: - case DT::ARR_DBL_7: - return pybind11::dtype("double"); - break; - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return pybind11::dtype("longdouble"); - break; - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return pybind11::dtype("csingle"); - break; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return pybind11::dtype("cdouble"); - break; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return pybind11::dtype("clongdouble"); - break; - case DT::BOOL: - return pybind11::dtype("bool"); // also "?" - break; - case DT::DATATYPE: - case DT::UNDEFINED: - default: - throw std::runtime_error("dtype_to_numpy: Invalid Datatype '{...}'!"); // _s.format(dt) - break; - } + case DT::CHAR: + case DT::VEC_CHAR: + case DT::STRING: + case DT::VEC_STRING: + return pybind11::dtype("b"); + break; + case DT::UCHAR: + case DT::VEC_UCHAR: + return pybind11::dtype("B"); + break; + // case DT::SCHAR: + // case DT::VEC_SCHAR: + // pybind11::dtype("b"); + // break; + case DT::SHORT: + case DT::VEC_SHORT: + return pybind11::dtype("short"); + break; + case DT::INT: + case DT::VEC_INT: + return pybind11::dtype("intc"); + break; + case DT::LONG: + case DT::VEC_LONG: + return pybind11::dtype("int_"); + break; + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return pybind11::dtype("longlong"); + break; + case DT::USHORT: + case DT::VEC_USHORT: + return pybind11::dtype("ushort"); + break; + case DT::UINT: + case DT::VEC_UINT: + return pybind11::dtype("uintc"); + break; + case DT::ULONG: + case DT::VEC_ULONG: + return pybind11::dtype("uint"); + break; + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return pybind11::dtype("ulonglong"); + break; + case DT::FLOAT: + case DT::VEC_FLOAT: + return pybind11::dtype("single"); + break; + case DT::DOUBLE: + case DT::VEC_DOUBLE: + case DT::ARR_DBL_7: + return pybind11::dtype("double"); + break; + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return pybind11::dtype("longdouble"); + break; + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return pybind11::dtype("csingle"); + break; + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return pybind11::dtype("cdouble"); + break; + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return pybind11::dtype("clongdouble"); + break; + case DT::BOOL: + return pybind11::dtype("bool"); // also "?" + break; + case DT::DATATYPE: + case DT::UNDEFINED: + default: + throw std::runtime_error( + "dtype_to_numpy: Invalid Datatype '{...}'!"); // _s.format(dt) + break; } +} } // namespace openPMD diff --git a/include/openPMD/binding/python/Pickle.hpp b/include/openPMD/binding/python/Pickle.hpp index d455ded75c..a57a63d447 100644 --- a/include/openPMD/binding/python/Pickle.hpp +++ b/include/openPMD/binding/python/Pickle.hpp @@ -20,9 +20,9 @@ */ #pragma once -#include "openPMD/backend/Attributable.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/Series.hpp" +#include "openPMD/backend/Attributable.hpp" #include #include @@ -32,60 +32,50 @@ #include #include - namespace openPMD { - /** Helper to Pickle Attributable Classes - * - * @tparam T_Args the types in pybind11::class_ - the first type will be pickled - * @tparam T_SeriesAccessor During unpickle, this accesses the object inside - * a newly constructed series - * @param cl the pybind11 class that gets the pickle methods defined - * @param seriesAccessor accessor from series to object during unpickling - */ - template< typename... T_Args, typename T_SeriesAccessor > - inline void - add_pickle( - pybind11::class_< T_Args... > & cl, - T_SeriesAccessor && seriesAccessor - ) - { - namespace py = pybind11; +/** Helper to Pickle Attributable Classes + * + * @tparam T_Args the types in pybind11::class_ - the first type will be pickled + * @tparam T_SeriesAccessor During unpickle, this accesses the object inside + * a newly constructed series + * @param cl the pybind11 class that gets the pickle methods defined + * @param seriesAccessor accessor from series to object during unpickling + */ +template +inline void +add_pickle(pybind11::class_ &cl, T_SeriesAccessor &&seriesAccessor) +{ + namespace py = pybind11; - // helper: get first class in py::class_ - that's the type we pickle - using PickledClass = typename std::tuple_element< - 0, - std::tuple< T_Args... > - >::type; + // helper: get first class in py::class_ - that's the type we pickle + using PickledClass = + typename std::tuple_element<0, std::tuple >::type; - cl.def(py::pickle( - // __getstate__ - []( const PickledClass &a ) { - // Return a tuple that fully encodes the state of the object - Attributable::MyPath const myPath = a.myPath(); - return py::make_tuple( myPath.filePath(), myPath.group ); - }, + cl.def(py::pickle( + // __getstate__ + [](const PickledClass &a) { + // Return a tuple that fully encodes the state of the object + Attributable::MyPath const myPath = a.myPath(); + return py::make_tuple(myPath.filePath(), myPath.group); + }, - // __setstate__ - [&seriesAccessor]( py::tuple t ) { - // our tuple has exactly two elements: filePath & group - if (t.size() != 2) - throw std::runtime_error("Invalid state!"); + // __setstate__ + [&seriesAccessor](py::tuple t) { + // our tuple has exactly two elements: filePath & group + if (t.size() != 2) + throw std::runtime_error("Invalid state!"); - std::string const filename = t[0].cast< std::string >(); - std::vector< std::string > const group = - t[1].cast< std::vector< std::string > >(); + std::string const filename = t[0].cast(); + std::vector const group = + t[1].cast >(); - // Create a new openPMD Series and keep it alive. - // This is a big hack for now, but it works for our use - // case, which is spinning up remote serial read series - // for DASK. - static auto series = openPMD::Series( - filename, - Access::READ_ONLY - ); - return seriesAccessor( series, group ); - } - )); - } + // Create a new openPMD Series and keep it alive. + // This is a big hack for now, but it works for our use + // case, which is spinning up remote serial read series + // for DASK. + static auto series = openPMD::Series(filename, Access::READ_ONLY); + return seriesAccessor(series, group); + })); +} } // namespace openPMD diff --git a/include/openPMD/binding/python/Variant.hpp b/include/openPMD/binding/python/Variant.hpp index 66a5ddd497..bfa72064f3 100644 --- a/include/openPMD/binding/python/Variant.hpp +++ b/include/openPMD/binding/python/Variant.hpp @@ -29,22 +29,26 @@ // https://github.com/pybind/pybind11/pull/811 // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html // in C++17 mode already defined in -#if __cplusplus < 201703L -namespace pybind11 { -namespace detail { - template< typename... Ts > - struct type_caster< variantSrc::variant< Ts... > > : - variant_caster< variantSrc::variant< Ts... > > +#if openPMD_HAS_CXX17 == 0 +namespace pybind11 +{ +namespace detail +{ + template + struct type_caster > + : variant_caster > {}; template <> - struct visit_helper< variantSrc::variant > { + struct visit_helper + { template - static auto call(Args &&...args) -> decltype( variantSrc::visit(std::forward(args)...) ) { + static auto call(Args &&...args) + -> decltype(variantSrc::visit(std::forward(args)...)) + { return variantSrc::visit(std::forward(args)...); } }; } // namespace detail } // namespace pybind11 #endif - diff --git a/include/openPMD/cli/ls.hpp b/include/openPMD/cli/ls.hpp index cefe36682d..1d2313e250 100644 --- a/include/openPMD/cli/ls.hpp +++ b/include/openPMD/cli/ls.hpp @@ -28,90 +28,103 @@ #include #include - namespace openPMD { namespace cli { -namespace ls -{ - inline void - print_help( std::string const program_name ) - { - std::cout << "Usage: " << program_name << " openPMD-series\n"; - std::cout << "List information about an openPMD data series.\n\n"; - std::cout << "Options:\n"; - std::cout << " -h, --help display this help and exit\n"; - std::cout << " -v, --version output version information and exit\n"; - std::cout << "\n"; - std::cout << "Examples:\n"; - std::cout << " " << program_name << " ./samples/git-sample/data%T.h5\n"; - std::cout << " " << program_name << " ./samples/git-sample/data%08T.h5\n"; - std::cout << " " << program_name << " ./samples/serial_write.json\n"; - std::cout << " " << program_name << " ./samples/serial_patch.bp\n"; - } - - inline void - print_version( std::string const program_name ) - { - std::cout << program_name << " (openPMD-api) " - << getVersion() << "\n"; - std::cout << "Copyright 2017-2021 openPMD contributors\n"; - std::cout << "Authors: Axel Huebl et al.\n"; - std::cout << "License: LGPLv3+\n"; - std::cout << "This is free software: you are free to change and redistribute it.\n" - "There is NO WARRANTY, to the extent permitted by law.\n"; - } - - /** Run the openpmd-ls command line tool - * - * @param argv command line arguments 1-N - * @return exit code (zero for success) - */ - inline int - run( std::vector< std::string > const & argv ) + namespace ls { - using namespace openPMD; - auto const argc = argv.size(); + inline void print_help(std::string const program_name) + { + std::cout << "Usage: " << program_name << " openPMD-series\n"; + std::cout << "List information about an openPMD data series.\n\n"; + std::cout << "Options:\n"; + std::cout << " -h, --help display this help and exit\n"; + std::cout + << " -v, --version output version information and exit\n"; + std::cout << "\n"; + std::cout << "Examples:\n"; + std::cout << " " << program_name + << " ./samples/git-sample/data%T.h5\n"; + std::cout << " " << program_name + << " ./samples/git-sample/data%08T.h5\n"; + std::cout << " " << program_name + << " ./samples/serial_write.json\n"; + std::cout << " " << program_name + << " ./samples/serial_patch.bp\n"; + } - if (argc < 2) { - print_help(argv[0]); - return 0; + inline void print_version(std::string const program_name) + { + std::cout << program_name << " (openPMD-api) " << getVersion() + << "\n"; + std::cout << "Copyright 2017-2021 openPMD contributors\n"; + std::cout << "Authors: Axel Huebl et al.\n"; + std::cout << "License: LGPLv3+\n"; + std::cout + << "This is free software: you are free to change and " + "redistribute it.\n" + "There is NO WARRANTY, to the extent permitted by law.\n"; } - for (int c = 1; c < int(argc); c++) { - if (std::string("--help") == argv[c] || std::string("-h") == argv[c]) { + /** Run the openpmd-ls command line tool + * + * @param argv command line arguments 1-N + * @return exit code (zero for success) + */ + inline int run(std::vector const &argv) + { + using namespace openPMD; + auto const argc = argv.size(); + + if (argc < 2) + { print_help(argv[0]); return 0; } - if (std::string("--version") == argv[c] || std::string("-v") == argv[c]) { - print_version(argv[0]); - return 0; + + for (int c = 1; c < int(argc); c++) + { + if (std::string("--help") == argv[c] || + std::string("-h") == argv[c]) + { + print_help(argv[0]); + return 0; + } + if (std::string("--version") == argv[c] || + std::string("-v") == argv[c]) + { + print_version(argv[0]); + return 0; + } } - } - if (argc > 2) { - std::cerr << "Too many arguments! See: " << argv[0] << " --help\n"; - return 1; - } + if (argc > 2) + { + std::cerr << "Too many arguments! See: " << argv[0] + << " --help\n"; + return 1; + } - try { - auto s = Series( - argv[1], - Access::READ_ONLY, - R"({"defer_iteration_parsing": true})" - ); + try + { + auto s = Series( + argv[1], + Access::READ_ONLY, + R"({"defer_iteration_parsing": true})"); - helper::listSeries(s, true, std::cout); - } - catch (std::exception const &e) { - std::cerr << "An error occurred while opening the specified openPMD series!\n"; - std::cerr << e.what() << std::endl; - return 2; - } + helper::listSeries(s, true, std::cout); + } + catch (std::exception const &e) + { + std::cerr << "An error occurred while opening the specified " + "openPMD series!\n"; + std::cerr << e.what() << std::endl; + return 2; + } - return 0; - } -} // namespace ls + return 0; + } + } // namespace ls } // namespace cli } // namespace openPMD diff --git a/include/openPMD/helper/list_series.hpp b/include/openPMD/helper/list_series.hpp index 1aba8a7b37..de0d5aca40 100644 --- a/include/openPMD/helper/list_series.hpp +++ b/include/openPMD/helper/list_series.hpp @@ -22,9 +22,8 @@ #include "openPMD/Series.hpp" -#include #include - +#include namespace openPMD { @@ -35,13 +34,12 @@ namespace helper * @param series a openPMD data path as in Series::Series * @param longer write more information * @param out an output stream to write textual information to - * @return reference to out as output stream, e.g. to pass the stream on via `operator<<` + * @return reference to out as output stream, e.g. to pass the stream on via + * `operator<<` */ - std::ostream & - listSeries( - Series & series, + std::ostream &listSeries( + Series &series, bool const longer = false, - std::ostream & out = std::cout - ); -} // helper -} // openPMD + std::ostream &out = std::cout); +} // namespace helper +} // namespace openPMD diff --git a/include/openPMD/openPMD.hpp b/include/openPMD/openPMD.hpp index d385709628..69e53e5457 100644 --- a/include/openPMD/openPMD.hpp +++ b/include/openPMD/openPMD.hpp @@ -22,27 +22,28 @@ /** Public definitions of openPMD-api */ -namespace openPMD {} +namespace openPMD +{} // IWYU pragma: begin_exports #include "openPMD/Dataset.hpp" #include "openPMD/Datatype.hpp" -#include "openPMD/IterationEncoding.hpp" #include "openPMD/Iteration.hpp" +#include "openPMD/IterationEncoding.hpp" #include "openPMD/Mesh.hpp" #include "openPMD/ParticlePatches.hpp" #include "openPMD/ParticleSpecies.hpp" #include "openPMD/ReadIterations.hpp" -#include "openPMD/RecordComponent.hpp" #include "openPMD/Record.hpp" +#include "openPMD/RecordComponent.hpp" #include "openPMD/Series.hpp" #include "openPMD/UnitDimension.hpp" #include "openPMD/WriteIterations.hpp" #include "openPMD/backend/Attributable.hpp" #include "openPMD/backend/Attribute.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/Container.hpp" #include "openPMD/backend/MeshRecordComponent.hpp" #include "openPMD/backend/PatchRecord.hpp" diff --git a/include/openPMD/version.hpp b/include/openPMD/version.hpp index 114221b26d..e91bf87599 100644 --- a/include/openPMD/version.hpp +++ b/include/openPMD/version.hpp @@ -29,11 +29,12 @@ */ #define OPENPMDAPI_VERSION_MAJOR 0 #define OPENPMDAPI_VERSION_MINOR 14 -#define OPENPMDAPI_VERSION_PATCH 4 +#define OPENPMDAPI_VERSION_PATCH 5 #define OPENPMDAPI_VERSION_LABEL "" /** @} */ -/** maximum supported version of the openPMD standard (read & write, compile-time) +/** maximum supported version of the openPMD standard (read & write, + * compile-time) * @{ */ #define OPENPMD_STANDARD_MAJOR 1 @@ -51,49 +52,51 @@ /** convert major, minor, patch version into a 1000th-interleaved number */ -#define OPENPMDAPI_VERSIONIFY(major,minor,patch) (major * 1000000 + minor * 1000 + patch) +#define OPENPMDAPI_VERSIONIFY(major, minor, patch) \ + (major * 1000000 + minor * 1000 + patch) /** Compare if the library version is greater or equal than major,minor,patch */ -#define OPENPMDAPI_VERSION_GE(major,minor,patch) \ - (OPENPMDAPI_VERSIONIFY(OPENPMDAPI_VERSION_MAJOR,OPENPMDAPI_VERSION_MINOR,OPENPMDAPI_VERSION_PATCH) >= \ - OPENPMDAPI_VERSIONIFY(major,minor,patch)) +#define OPENPMDAPI_VERSION_GE(major, minor, patch) \ + (OPENPMDAPI_VERSIONIFY( \ + OPENPMDAPI_VERSION_MAJOR, \ + OPENPMDAPI_VERSION_MINOR, \ + OPENPMDAPI_VERSION_PATCH) >= \ + OPENPMDAPI_VERSIONIFY(major, minor, patch)) namespace openPMD { - /** Return the version of the openPMD-api library (run-time) - * - * @return std::string API version (dot separated) - */ - std::string - getVersion( ); +/** Return the version of the openPMD-api library (run-time) + * + * @return std::string API version (dot separated) + */ +std::string getVersion(); - /** Return the maximum supported version of the openPMD standard (read & write, run-time) - * - * @return std::string openPMD standard version (dot separated) - */ - std::string - getStandard( ); +/** Return the maximum supported version of the openPMD standard (read & write, + * run-time) + * + * @return std::string openPMD standard version (dot separated) + */ +std::string getStandard(); - /** Return the minimum supported version of the openPMD standard (read, run-time) - * - * @return std::string minimum openPMD standard version (dot separated) - */ - std::string - getStandardMinimum( ); +/** Return the minimum supported version of the openPMD standard (read, + * run-time) + * + * @return std::string minimum openPMD standard version (dot separated) + */ +std::string getStandardMinimum(); - /** Return the feature variants of the openPMD-api library (run-time) - * - * @return std::map< std::string, bool > with variants such as backends - */ - std::map< std::string, bool > - getVariants( ); +/** Return the feature variants of the openPMD-api library (run-time) + * + * @return std::map< std::string, bool > with variants such as backends + */ +std::map getVariants(); - /** Return the file extensions supported in this variant of the openPMD-api library (run-time) - * - * @return std::vector< std::string > with file extensions - */ - std::vector< std::string > - getFileExtensions( ); +/** Return the file extensions supported in this variant of the openPMD-api + * library (run-time) + * + * @return std::vector< std::string > with file extensions + */ +std::vector getFileExtensions(); } // namespace openPMD diff --git a/setup.py b/setup.py index 2ab46e545a..8f2227d414 100644 --- a/setup.py +++ b/setup.py @@ -1,12 +1,12 @@ import os -import re -import sys import platform +import re import subprocess +import sys +from distutils.version import LooseVersion -from setuptools import setup, Extension +from setuptools import Extension, setup from setuptools.command.build_ext import build_ext -from distutils.version import LooseVersion class CMakeExtension(Extension): @@ -79,6 +79,8 @@ def build_extension(self, ext): # just as well win32 & cygwin (although Windows has no RPaths) cmake_args.append('-DCMAKE_INSTALL_RPATH=$ORIGIN') + cmake_args += extra_cmake_args + cfg = 'Debug' if self.debug else 'Release' build_args = ['--config', cfg] @@ -139,6 +141,16 @@ def build_extension(self, ext): CMAKE_INTERPROCEDURAL_OPTIMIZATION = os.environ.get( 'CMAKE_INTERPROCEDURAL_OPTIMIZATION', None) +# extra CMake arguments +extra_cmake_args = [] +for k, v in os.environ.items(): + extra_cmake_args_prefix = "openPMD_CMAKE_" + if k.startswith(extra_cmake_args_prefix) and \ + len(k) > len(extra_cmake_args_prefix): + extra_cmake_args.append("-D{0}={1}".format( + k[len(extra_cmake_args_prefix):], + v)) + # https://cmake.org/cmake/help/v3.0/command/if.html if openPMD_USE_MPI.upper() in ['1', 'ON', 'TRUE', 'YES']: openPMD_USE_MPI = "ON" @@ -156,7 +168,7 @@ def build_extension(self, ext): setup( name='openPMD-api', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version='0.14.4', + version='0.14.5', author='Axel Huebl, Franz Poeschel, Fabian Koller, Junmin Gu', author_email='axelhuebl@lbl.gov, f.poeschel@hzdr.de', maintainer='Axel Huebl', diff --git a/share/openPMD/thirdParty/pybind11/include/pybind11/pybind11.h b/share/openPMD/thirdParty/pybind11/include/pybind11/pybind11.h index 3bffbb28d2..f284fb7f9f 100644 --- a/share/openPMD/thirdParty/pybind11/include/pybind11/pybind11.h +++ b/share/openPMD/thirdParty/pybind11/include/pybind11/pybind11.h @@ -10,6 +10,23 @@ #pragma once +/* https://github.com/microsoft/onnxruntime/issues/9735#issuecomment-970718821 + * The following block patches MSVC debug builds: + * Include Python header, disable linking to pythonX_d.lib on Windows in debug mode. + */ +#if defined(_MSC_VER) +# if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4) +# define HAVE_ROUND 1 +# endif +# include +# pragma warning(push) +# pragma warning(disable : 4510 4610 4512 4005) +# if defined(_DEBUG) && !defined(Py_DEBUG) +# define PYBIND11_DEBUG_MARKER +# undef _DEBUG +# endif +#endif + #if defined(__INTEL_COMPILER) # pragma warning push # pragma warning disable 68 // integer conversion resulted in a change of sign diff --git a/src/ChunkInfo.cpp b/src/ChunkInfo.cpp index 56299f02e1..3c01b7b681 100644 --- a/src/ChunkInfo.cpp +++ b/src/ChunkInfo.cpp @@ -22,38 +22,30 @@ #include - namespace openPMD { -ChunkInfo::ChunkInfo( Offset offset_in, Extent extent_in ) - : offset( std::move( offset_in ) ), extent( std::move( extent_in ) ) -{ -} +ChunkInfo::ChunkInfo(Offset offset_in, Extent extent_in) + : offset(std::move(offset_in)), extent(std::move(extent_in)) +{} -bool -ChunkInfo::operator==( ChunkInfo const & other ) const +bool ChunkInfo::operator==(ChunkInfo const &other) const { return this->offset == other.offset && this->extent == other.extent; } WrittenChunkInfo::WrittenChunkInfo( - Offset offset_in, - Extent extent_in, - int sourceID_in ) - : ChunkInfo( std::move( offset_in ), std::move( extent_in ) ) - , sourceID( sourceID_in < 0 ? 0 : sourceID_in ) -{ -} + Offset offset_in, Extent extent_in, int sourceID_in) + : ChunkInfo(std::move(offset_in), std::move(extent_in)) + , sourceID(sourceID_in < 0 ? 0 : sourceID_in) +{} -WrittenChunkInfo::WrittenChunkInfo( Offset offset_in, Extent extent_in ) - : WrittenChunkInfo( std::move( offset_in ), std::move( extent_in ), 0 ) -{ -} +WrittenChunkInfo::WrittenChunkInfo(Offset offset_in, Extent extent_in) + : WrittenChunkInfo(std::move(offset_in), std::move(extent_in), 0) +{} -bool -WrittenChunkInfo::operator==( WrittenChunkInfo const & other ) const +bool WrittenChunkInfo::operator==(WrittenChunkInfo const &other) const { return this->sourceID == other.sourceID && - this->ChunkInfo::operator==( other ); + this->ChunkInfo::operator==(other); } } // namespace openPMD diff --git a/src/Dataset.cpp b/src/Dataset.cpp index 21be4fa3f2..61aaf6b357 100644 --- a/src/Dataset.cpp +++ b/src/Dataset.cpp @@ -20,71 +20,72 @@ */ #include "openPMD/Dataset.hpp" -#include #include - +#include namespace openPMD { Dataset::Dataset(Datatype d, Extent e, std::string options_in) - : extent{e}, - dtype{d}, - rank{static_cast(e.size())}, - chunkSize{e}, - options{std::move(options_in)} -{ } + : extent{e} + , dtype{d} + , rank{static_cast(e.size())} + , chunkSize{e} + , options{std::move(options_in)} +{} -Dataset::Dataset( Extent e ) : Dataset( Datatype::UNDEFINED, std::move( e ) ) -{ -} +Dataset::Dataset(Extent e) : Dataset(Datatype::UNDEFINED, std::move(e)) +{} -Dataset & -Dataset::extend( Extent newExtents ) +Dataset &Dataset::extend(Extent newExtents) { - if( newExtents.size() != rank ) - throw std::runtime_error("Dimensionality of extended Dataset must match the original dimensionality"); - for( size_t i = 0; i < newExtents.size(); ++i ) - if( newExtents[i] < extent[i] ) - throw std::runtime_error("New Extent must be equal or greater than previous Extent"); + if (newExtents.size() != rank) + throw std::runtime_error( + "Dimensionality of extended Dataset must match the original " + "dimensionality"); + for (size_t i = 0; i < newExtents.size(); ++i) + if (newExtents[i] < extent[i]) + throw std::runtime_error( + "New Extent must be equal or greater than previous Extent"); extent = newExtents; return *this; } -Dataset& -Dataset::setChunkSize(Extent const& cs) +Dataset &Dataset::setChunkSize(Extent const &cs) { - if( extent.size() != rank ) - throw std::runtime_error("Dimensionality of extended Dataset must match the original dimensionality"); - for( size_t i = 0; i < cs.size(); ++i ) - if( cs[i] > extent[i] ) - throw std::runtime_error("Dataset chunk size must be equal or smaller than Extent"); + if (extent.size() != rank) + throw std::runtime_error( + "Dimensionality of extended Dataset must match the original " + "dimensionality"); + for (size_t i = 0; i < cs.size(); ++i) + if (cs[i] > extent[i]) + throw std::runtime_error( + "Dataset chunk size must be equal or smaller than Extent"); chunkSize = cs; return *this; } -Dataset& -Dataset::setCompression(std::string const& format, uint8_t const level) +Dataset &Dataset::setCompression(std::string const &format, uint8_t const level) { - if(format == "zlib" || format == "gzip" || format == "deflate") + if (format == "zlib" || format == "gzip" || format == "deflate") { - if(level > 9) - throw std::runtime_error("Compression level out of range for " + format); + if (level > 9) + throw std::runtime_error( + "Compression level out of range for " + format); } else std::cerr << "Unknown compression format " << format << ". This might mean that compression will not be enabled." << std::endl; - compression = format + ':' + std::to_string(static_cast< int >(level)); + compression = format + ':' + std::to_string(static_cast(level)); return *this; } -Dataset& -Dataset::setCustomTransform(std::string const& parameter) +Dataset &Dataset::setCustomTransform(std::string const ¶meter) { transform = parameter; return *this; } -} // openPMD +} // namespace openPMD diff --git a/src/Datatype.cpp b/src/Datatype.cpp index af6260c2ad..4b76301855 100644 --- a/src/Datatype.cpp +++ b/src/Datatype.cpp @@ -21,416 +21,267 @@ #include "openPMD/Datatype.hpp" #include "openPMD/DatatypeHelpers.hpp" -#include #include -#include #include - +#include +#include namespace openPMD { -void warnWrongDtype(std::string const& key, - Datatype store, - Datatype request) +void warnWrongDtype(std::string const &key, Datatype store, Datatype request) { - std::cerr << "Warning: Attribute '" << key - << "' stored as " << store + std::cerr << "Warning: Attribute '" << key << "' stored as " << store << ", requested as " << request << ". Casting unconditionally with possible loss of precision.\n"; } -std::ostream& -operator<<(std::ostream& os, openPMD::Datatype const & d) +std::ostream &operator<<(std::ostream &os, openPMD::Datatype const &d) { using DT = openPMD::Datatype; - switch( d ) + switch (d) { - case DT::CHAR: - os << "CHAR"; - break; - case DT::UCHAR: - os << "UCHAR"; - break; - case DT::SHORT: - os << "SHORT"; - break; - case DT::INT: - os << "INT"; - break; - case DT::LONG: - os << "LONG"; - break; - case DT::LONGLONG: - os << "LONGLONG"; - break; - case DT::USHORT: - os << "USHORT"; - break; - case DT::UINT: - os << "UINT"; - break; - case DT::ULONG: - os << "ULONG"; - break; - case DT::ULONGLONG: - os << "ULONGLONG"; - break; - case DT::FLOAT: - os << "FLOAT"; - break; - case DT::DOUBLE: - os << "DOUBLE"; - break; - case DT::LONG_DOUBLE: - os << "LONG_DOUBLE"; - break; - case DT::CFLOAT: - os << "CFLOAT"; - break; - case DT::CDOUBLE: - os << "CDOUBLE"; - break; - case DT::CLONG_DOUBLE: - os << "CLONG_DOUBLE"; - break; - case DT::STRING: - os << "STRING"; - break; - case DT::VEC_CHAR: - os << "VEC_CHAR"; - break; - case DT::VEC_SHORT: - os << "VEC_SHORT"; - break; - case DT::VEC_INT: - os << "VEC_INT"; - break; - case DT::VEC_LONG: - os << "VEC_LONG"; - break; - case DT::VEC_LONGLONG: - os << "VEC_LONGLONG"; - break; - case DT::VEC_UCHAR: - os << "VEC_UCHAR"; - break; - case DT::VEC_USHORT: - os << "VEC_USHORT"; - break; - case DT::VEC_UINT: - os << "VEC_UINT"; - break; - case DT::VEC_ULONG: - os << "VEC_ULONG"; - break; - case DT::VEC_ULONGLONG: - os << "VEC_ULONGLONG"; - break; - case DT::VEC_FLOAT: - os << "VEC_FLOAT"; - break; - case DT::VEC_DOUBLE: - os << "VEC_DOUBLE"; - break; - case DT::VEC_LONG_DOUBLE: - os << "VEC_LONG_DOUBLE"; - break; - case DT::VEC_CFLOAT: - os << "VEC_CFLOAT"; - break; - case DT::VEC_CDOUBLE: - os << "VEC_CDOUBLE"; - break; - case DT::VEC_CLONG_DOUBLE: - os << "VEC_CLONG_DOUBLE"; - break; - case DT::VEC_STRING: - os << "VEC_STRING"; - break; - case DT::ARR_DBL_7: - os << "ARR_DBL_7"; - break; - case DT::BOOL: - os << "BOOL"; - break; - case DT::DATATYPE: - os << "DATATYPE"; - break; - case DT::UNDEFINED: - os << "UNDEFINED"; - break; + case DT::CHAR: + os << "CHAR"; + break; + case DT::UCHAR: + os << "UCHAR"; + break; + case DT::SHORT: + os << "SHORT"; + break; + case DT::INT: + os << "INT"; + break; + case DT::LONG: + os << "LONG"; + break; + case DT::LONGLONG: + os << "LONGLONG"; + break; + case DT::USHORT: + os << "USHORT"; + break; + case DT::UINT: + os << "UINT"; + break; + case DT::ULONG: + os << "ULONG"; + break; + case DT::ULONGLONG: + os << "ULONGLONG"; + break; + case DT::FLOAT: + os << "FLOAT"; + break; + case DT::DOUBLE: + os << "DOUBLE"; + break; + case DT::LONG_DOUBLE: + os << "LONG_DOUBLE"; + break; + case DT::CFLOAT: + os << "CFLOAT"; + break; + case DT::CDOUBLE: + os << "CDOUBLE"; + break; + case DT::CLONG_DOUBLE: + os << "CLONG_DOUBLE"; + break; + case DT::STRING: + os << "STRING"; + break; + case DT::VEC_CHAR: + os << "VEC_CHAR"; + break; + case DT::VEC_SHORT: + os << "VEC_SHORT"; + break; + case DT::VEC_INT: + os << "VEC_INT"; + break; + case DT::VEC_LONG: + os << "VEC_LONG"; + break; + case DT::VEC_LONGLONG: + os << "VEC_LONGLONG"; + break; + case DT::VEC_UCHAR: + os << "VEC_UCHAR"; + break; + case DT::VEC_USHORT: + os << "VEC_USHORT"; + break; + case DT::VEC_UINT: + os << "VEC_UINT"; + break; + case DT::VEC_ULONG: + os << "VEC_ULONG"; + break; + case DT::VEC_ULONGLONG: + os << "VEC_ULONGLONG"; + break; + case DT::VEC_FLOAT: + os << "VEC_FLOAT"; + break; + case DT::VEC_DOUBLE: + os << "VEC_DOUBLE"; + break; + case DT::VEC_LONG_DOUBLE: + os << "VEC_LONG_DOUBLE"; + break; + case DT::VEC_CFLOAT: + os << "VEC_CFLOAT"; + break; + case DT::VEC_CDOUBLE: + os << "VEC_CDOUBLE"; + break; + case DT::VEC_CLONG_DOUBLE: + os << "VEC_CLONG_DOUBLE"; + break; + case DT::VEC_STRING: + os << "VEC_STRING"; + break; + case DT::ARR_DBL_7: + os << "ARR_DBL_7"; + break; + case DT::BOOL: + os << "BOOL"; + break; + case DT::DATATYPE: + os << "DATATYPE"; + break; + case DT::UNDEFINED: + os << "UNDEFINED"; + break; } return os; } - Datatype stringToDatatype( std::string s ) +Datatype stringToDatatype(std::string s) +{ + static std::unordered_map m{ + {"CHAR", Datatype::CHAR}, + {"UCHAR", Datatype::UCHAR}, + {"SHORT", Datatype::SHORT}, + {"INT", Datatype::INT}, + {"LONG", Datatype::LONG}, + {"LONGLONG", Datatype::LONGLONG}, + {"USHORT", Datatype::USHORT}, + {"UINT", Datatype::UINT}, + {"ULONG", Datatype::ULONG}, + {"ULONGLONG", Datatype::ULONGLONG}, + {"FLOAT", Datatype::FLOAT}, + {"DOUBLE", Datatype::DOUBLE}, + {"LONG_DOUBLE", Datatype::LONG_DOUBLE}, + {"CFLOAT", Datatype::CFLOAT}, + {"CDOUBLE", Datatype::CDOUBLE}, + {"CLONG_DOUBLE", Datatype::CLONG_DOUBLE}, + {"STRING", Datatype::STRING}, + {"VEC_CHAR", Datatype::VEC_CHAR}, + {"VEC_SHORT", Datatype::VEC_SHORT}, + {"VEC_INT", Datatype::VEC_INT}, + {"VEC_LONG", Datatype::VEC_LONG}, + {"VEC_LONGLONG", Datatype::VEC_LONGLONG}, + {"VEC_UCHAR", Datatype::VEC_UCHAR}, + {"VEC_USHORT", Datatype::VEC_USHORT}, + {"VEC_UINT", Datatype::VEC_UINT}, + {"VEC_ULONG", Datatype::VEC_ULONG}, + {"VEC_ULONGLONG", Datatype::VEC_ULONGLONG}, + {"VEC_FLOAT", Datatype::VEC_FLOAT}, + {"VEC_DOUBLE", Datatype::VEC_DOUBLE}, + {"VEC_LONG_DOUBLE", Datatype::VEC_LONG_DOUBLE}, + {"VEC_CFLOAT", Datatype::VEC_CFLOAT}, + {"VEC_CDOUBLE", Datatype::VEC_CDOUBLE}, + {"VEC_CLONG_DOUBLE", Datatype::VEC_CLONG_DOUBLE}, + {"VEC_STRING", Datatype::VEC_STRING}, + {"ARR_DBL_7", Datatype::ARR_DBL_7}, + {"BOOL", Datatype::BOOL}, + {"DATATYPE", Datatype::DATATYPE}, + {"UNDEFINED", Datatype::UNDEFINED}}; + auto it = m.find(s); + if (it != m.end()) { - static std::unordered_map< - std::string, - Datatype - > m { - { - "CHAR", - Datatype::CHAR - }, - { - "UCHAR", - Datatype::UCHAR - }, - { - "SHORT", - Datatype::SHORT - }, - { - "INT", - Datatype::INT - }, - { - "LONG", - Datatype::LONG - }, - { - "LONGLONG", - Datatype::LONGLONG - }, - { - "USHORT", - Datatype::USHORT - }, - { - "UINT", - Datatype::UINT - }, - { - "ULONG", - Datatype::ULONG - }, - { - "ULONGLONG", - Datatype::ULONGLONG - }, - { - "FLOAT", - Datatype::FLOAT - }, - { - "DOUBLE", - Datatype::DOUBLE - }, - { - "LONG_DOUBLE", - Datatype::LONG_DOUBLE - }, - { - "CFLOAT", - Datatype::CFLOAT - }, - { - "CDOUBLE", - Datatype::CDOUBLE - }, - { - "CLONG_DOUBLE", - Datatype::CLONG_DOUBLE - }, - { - "STRING", - Datatype::STRING - }, - { - "VEC_CHAR", - Datatype::VEC_CHAR - }, - { - "VEC_SHORT", - Datatype::VEC_SHORT - }, - { - "VEC_INT", - Datatype::VEC_INT - }, - { - "VEC_LONG", - Datatype::VEC_LONG - }, - { - "VEC_LONGLONG", - Datatype::VEC_LONGLONG - }, - { - "VEC_UCHAR", - Datatype::VEC_UCHAR - }, - { - "VEC_USHORT", - Datatype::VEC_USHORT - }, - { - "VEC_UINT", - Datatype::VEC_UINT - }, - { - "VEC_ULONG", - Datatype::VEC_ULONG - }, - { - "VEC_ULONGLONG", - Datatype::VEC_ULONGLONG - }, - { - "VEC_FLOAT", - Datatype::VEC_FLOAT - }, - { - "VEC_DOUBLE", - Datatype::VEC_DOUBLE - }, - { - "VEC_LONG_DOUBLE", - Datatype::VEC_LONG_DOUBLE - }, - { - "VEC_CFLOAT", - Datatype::VEC_CFLOAT - }, - { - "VEC_CDOUBLE", - Datatype::VEC_CDOUBLE - }, - { - "VEC_CLONG_DOUBLE", - Datatype::VEC_CLONG_DOUBLE - }, - { - "VEC_STRING", - Datatype::VEC_STRING - }, - { - "ARR_DBL_7", - Datatype::ARR_DBL_7 - }, - { - "BOOL", - Datatype::BOOL - }, - { - "DATATYPE", - Datatype::DATATYPE - }, - { - "UNDEFINED", - Datatype::UNDEFINED - } - }; - auto it = m.find( s ); - if( it != m.end( ) ) - { - return it->second; - } - else - { - throw std::runtime_error( "Unknown datatype in string deserialization." ); - } + return it->second; } - - - std::string datatypeToString( openPMD::Datatype dt ) + else { - std::stringbuf buf; - std::ostream os(&buf); - os << dt; - return buf.str(); + throw std::runtime_error("Unknown datatype in string deserialization."); } +} - std::vector openPMD_Datatypes{ - Datatype::CHAR , - Datatype::UCHAR, - Datatype::SHORT, - Datatype::INT, - Datatype::LONG, - Datatype::LONGLONG, - Datatype::USHORT, - Datatype::UINT, - Datatype::ULONG, - Datatype::ULONGLONG, - Datatype::FLOAT, - Datatype::DOUBLE, - Datatype::LONG_DOUBLE, - Datatype::CFLOAT, - Datatype::CDOUBLE, - Datatype::CLONG_DOUBLE, - Datatype::STRING, - Datatype::VEC_CHAR, - Datatype::VEC_SHORT, - Datatype::VEC_INT, - Datatype::VEC_LONG, - Datatype::VEC_LONGLONG, - Datatype::VEC_UCHAR, - Datatype::VEC_USHORT, - Datatype::VEC_UINT, - Datatype::VEC_ULONG, - Datatype::VEC_ULONGLONG, - Datatype::VEC_FLOAT, - Datatype::VEC_DOUBLE, - Datatype::VEC_LONG_DOUBLE, - Datatype::VEC_CFLOAT, - Datatype::VEC_CDOUBLE, - Datatype::VEC_CLONG_DOUBLE, - Datatype::VEC_STRING, - Datatype::ARR_DBL_7, - Datatype::BOOL, - Datatype::DATATYPE, - Datatype::UNDEFINED - }; +std::string datatypeToString(openPMD::Datatype dt) +{ + std::stringbuf buf; + std::ostream os(&buf); + os << dt; + return buf.str(); +} + +std::vector openPMD_Datatypes{ + Datatype::CHAR, Datatype::UCHAR, Datatype::SHORT, + Datatype::INT, Datatype::LONG, Datatype::LONGLONG, + Datatype::USHORT, Datatype::UINT, Datatype::ULONG, + Datatype::ULONGLONG, Datatype::FLOAT, Datatype::DOUBLE, + Datatype::LONG_DOUBLE, Datatype::CFLOAT, Datatype::CDOUBLE, + Datatype::CLONG_DOUBLE, Datatype::STRING, Datatype::VEC_CHAR, + Datatype::VEC_SHORT, Datatype::VEC_INT, Datatype::VEC_LONG, + Datatype::VEC_LONGLONG, Datatype::VEC_UCHAR, Datatype::VEC_USHORT, + Datatype::VEC_UINT, Datatype::VEC_ULONG, Datatype::VEC_ULONGLONG, + Datatype::VEC_FLOAT, Datatype::VEC_DOUBLE, Datatype::VEC_LONG_DOUBLE, + Datatype::VEC_CFLOAT, Datatype::VEC_CDOUBLE, Datatype::VEC_CLONG_DOUBLE, + Datatype::VEC_STRING, Datatype::ARR_DBL_7, Datatype::BOOL, + Datatype::DATATYPE, Datatype::UNDEFINED}; +Datatype basicDatatype(Datatype dt) +{ + return switchType(dt, detail::BasicDatatype{}); +} - Datatype basicDatatype( Datatype dt ) +Datatype toVectorType(Datatype dt) +{ + auto initializer = []() { + std::map res; + for (Datatype d : openPMD_Datatypes) + { + if (d == Datatype::ARR_DBL_7 || d == Datatype::UNDEFINED || + d == Datatype::DATATYPE) + continue; + Datatype basic = basicDatatype(d); + if (basic == d) + continue; + res[basic] = d; + } + return res; + }; + static auto map(initializer()); + auto it = map.find(dt); + if (it != map.end()) { - return switchType( dt, detail::BasicDatatype{} ); + return it->second; } - - Datatype toVectorType( Datatype dt ) + else { - auto initializer = []() { - std::map res; - for (Datatype d: openPMD_Datatypes) { - if (d == Datatype::ARR_DBL_7 - || d == Datatype::UNDEFINED - || d == Datatype::DATATYPE) - continue; - Datatype basic = basicDatatype(d); - if (basic == d) - continue; - res[basic] = d; - } - return res; - }; - static auto map (initializer()); - auto it = map.find(dt); - if (it != map.end()) { - return it->second; - } else { - std::cerr << "Encountered non-basic type " << dt << ", aborting." - << std::endl; - throw std::runtime_error("toVectorType: passed non-basic type."); - } + std::cerr << "Encountered non-basic type " << dt << ", aborting." + << std::endl; + throw std::runtime_error("toVectorType: passed non-basic type."); } +} +namespace detail +{ + template + Datatype BasicDatatype::operator()() + { + static auto res = BasicDatatypeHelper{}.m_dt; + return res; + } - namespace detail { - template< typename T > - Datatype BasicDatatype::operator()() - { - static auto res = BasicDatatypeHelper{}.m_dt; - return res; - } - - - template< int n > - Datatype BasicDatatype::operator()() - { - throw std::runtime_error( "basicDatatype: received unknown datatype." ); - } + template + Datatype BasicDatatype::operator()() + { + throw std::runtime_error("basicDatatype: received unknown datatype."); } -} +} // namespace detail +} // namespace openPMD diff --git a/src/Format.cpp b/src/Format.cpp index 98defa14b3..073b136c2e 100644 --- a/src/Format.cpp +++ b/src/Format.cpp @@ -18,69 +18,72 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/config.hpp" #include "openPMD/IO/Format.hpp" -#include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/auxiliary/Environment.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/config.hpp" #include - -namespace openPMD { - Format - determineFormat(std::string const &filename) { - if (auxiliary::ends_with(filename, ".h5")) - return Format::HDF5; - if (auxiliary::ends_with(filename, ".bp")) { - auto const bp_backend = auxiliary::getEnvString( - "OPENPMD_BP_BACKEND", +namespace openPMD +{ +Format determineFormat(std::string const &filename) +{ + if (auxiliary::ends_with(filename, ".h5")) + return Format::HDF5; + if (auxiliary::ends_with(filename, ".bp")) + { + auto const bp_backend = auxiliary::getEnvString( + "OPENPMD_BP_BACKEND", #if openPMD_HAVE_ADIOS2 - "ADIOS2" + "ADIOS2" #elif openPMD_HAVE_ADIOS1 - "ADIOS1" + "ADIOS1" #else - "ADIOS2" + "ADIOS2" #endif - ); - - if (bp_backend == "ADIOS2") - return Format::ADIOS2; - else if (bp_backend == "ADIOS1") - return Format::ADIOS1; - else - throw std::runtime_error( - "Environment variable OPENPMD_BP_BACKEND for .bp backend is neither ADIOS1 nor ADIOS2: " + - bp_backend - ); - } - if (auxiliary::ends_with(filename, ".sst")) - return Format::ADIOS2_SST; - if (auxiliary::ends_with(filename, ".ssc")) - return Format::ADIOS2_SSC; - if (auxiliary::ends_with(filename, ".json")) - return Format::JSON; - if (std::string::npos != filename.find('.') /* extension is provided */ ) - throw std::runtime_error("Unknown file format. Did you append a valid filename extension?"); + ); - return Format::DUMMY; + if (bp_backend == "ADIOS2") + return Format::ADIOS2; + else if (bp_backend == "ADIOS1") + return Format::ADIOS1; + else + throw std::runtime_error( + "Environment variable OPENPMD_BP_BACKEND for .bp backend is " + "neither ADIOS1 nor ADIOS2: " + + bp_backend); } + if (auxiliary::ends_with(filename, ".sst")) + return Format::ADIOS2_SST; + if (auxiliary::ends_with(filename, ".ssc")) + return Format::ADIOS2_SSC; + if (auxiliary::ends_with(filename, ".json")) + return Format::JSON; + if (std::string::npos != filename.find('.') /* extension is provided */) + throw std::runtime_error( + "Unknown file format. Did you append a valid filename extension?"); + + return Format::DUMMY; +} - std::string - suffix(Format f) { - switch (f) { - case Format::HDF5: - return ".h5"; - case Format::ADIOS1: - case Format::ADIOS2: - return ".bp"; - case Format::ADIOS2_SST: - return ".sst"; - case Format::ADIOS2_SSC: - return ".ssc"; - case Format::JSON: - return ".json"; - default: - return ""; - } +std::string suffix(Format f) +{ + switch (f) + { + case Format::HDF5: + return ".h5"; + case Format::ADIOS1: + case Format::ADIOS2: + return ".bp"; + case Format::ADIOS2_SST: + return ".sst"; + case Format::ADIOS2_SSC: + return ".ssc"; + case Format::JSON: + return ".json"; + default: + return ""; } +} } // namespace openPMD diff --git a/src/IO/ADIOS/ADIOS1IOHandler.cpp b/src/IO/ADIOS/ADIOS1IOHandler.cpp index 73aae83f0e..f13808190b 100644 --- a/src/IO/ADIOS/ADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS1IOHandler.cpp @@ -22,254 +22,349 @@ #include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" #if openPMD_HAVE_ADIOS1 -# include "openPMD/auxiliary/Filesystem.hpp" -# include "openPMD/auxiliary/DerefDynamicCast.hpp" -# include "openPMD/auxiliary/Memory.hpp" -# include "openPMD/auxiliary/StringManip.hpp" -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" -# include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" -# include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" -# include "openPMD/IO/IOTask.hpp" -# include -# include -# include +#include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" +#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/IO/IOTask.hpp" +#include "openPMD/auxiliary/DerefDynamicCast.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include +#include +#include #endif #include - namespace openPMD { #if openPMD_HAVE_ADIOS1 -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif -ADIOS1IOHandlerImpl::ADIOS1IOHandlerImpl(AbstractIOHandler* handler) - : AbstractIOHandlerImpl(handler) -{ } +ADIOS1IOHandlerImpl::ADIOS1IOHandlerImpl(AbstractIOHandler *handler) + : AbstractIOHandlerImpl(handler) +{} ADIOS1IOHandlerImpl::~ADIOS1IOHandlerImpl() { - for( auto& f : m_openReadFileHandles ) + for (auto &f : m_openReadFileHandles) close(f.second); m_openReadFileHandles.clear(); - if( this->m_handler->m_backendAccess != Access::READ_ONLY ) + if (this->m_handler->m_backendAccess != Access::READ_ONLY) { - for( auto& group : m_attributeWrites ) - for( auto& att : group.second ) + for (auto &group : m_attributeWrites) + for (auto &att : group.second) flush_attribute(group.first, att.first, att.second); - for( auto& f : m_openWriteFileHandles ) + for (auto &f : m_openWriteFileHandles) close(f.second); m_openWriteFileHandles.clear(); } int status; status = adios_read_finalize_method(m_readMethod); - if( status != err_no_error ) - std::cerr << "Internal error: Failed to finalize ADIOS reading method (serial)\n"; + if (status != err_no_error) + std::cerr << "Internal error: Failed to finalize ADIOS reading method " + "(serial)\n"; status = adios_finalize(0); - if( status != err_no_error ) + if (status != err_no_error) std::cerr << "Internal error: Failed to finalize ADIOS (serial)\n"; } -std::future< void > -ADIOS1IOHandlerImpl::flush() +std::future ADIOS1IOHandlerImpl::flush() { using namespace auxiliary; - auto handler = dynamic_cast< ADIOS1IOHandler* >(m_handler); - while( !handler->m_setup.empty() ) + auto handler = dynamic_cast(m_handler); + while (!handler->m_setup.empty()) { - IOTask& i = handler->m_setup.front(); + IOTask &i = handler->m_setup.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::CREATE_FILE: - createFile(i.writable, deref_dynamic_cast< Parameter< Operation::CREATE_FILE > >(i.parameter.get())); - break; - case O::CREATE_PATH: - createPath(i.writable, deref_dynamic_cast< Parameter< O::CREATE_PATH > >(i.parameter.get())); - break; - case O::OPEN_PATH: - openPath(i.writable, deref_dynamic_cast< Parameter< O::OPEN_PATH > >(i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset(i.writable, deref_dynamic_cast< Parameter< O::CREATE_DATASET > >(i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute(i.writable, deref_dynamic_cast< Parameter< O::WRITE_ATT > >(i.parameter.get())); - break; - case O::OPEN_FILE: - openFile(i.writable, deref_dynamic_cast< Parameter< O::OPEN_FILE > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS setup queue"); + case O::CREATE_FILE: + createFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CREATE_PATH: + createPath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_PATH: + openPath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CREATE_DATASET: + createDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::WRITE_ATT: + writeAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_FILE: + openFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS setup " + "queue"); } - } catch (unsupported_data_error& e) + } + catch (...) { + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; handler->m_setup.pop(); throw; } handler->m_setup.pop(); } - - while( !handler->m_work.empty() ) + while (!handler->m_work.empty()) { using namespace auxiliary; - IOTask& i = handler->m_work.front(); + IOTask &i = handler->m_work.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::EXTEND_DATASET: - extendDataset(i.writable, deref_dynamic_cast< Parameter< O::EXTEND_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_PATH > >(i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset(i.writable, deref_dynamic_cast< Parameter< O::OPEN_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile(i.writable, *dynamic_cast< Parameter< O::CLOSE_FILE >* >(i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile(i.writable, deref_dynamic_cast< Parameter< O::DELETE_FILE > >(i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath(i.writable, deref_dynamic_cast< Parameter< O::DELETE_PATH > >(i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset(i.writable, deref_dynamic_cast< Parameter< O::DELETE_DATASET > >(i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute(i.writable, deref_dynamic_cast< Parameter< O::DELETE_ATT > >(i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset(i.writable, deref_dynamic_cast< Parameter< O::WRITE_DATASET > >(i.parameter.get())); - break; - case O::READ_DATASET: - readDataset(i.writable, deref_dynamic_cast< Parameter< O::READ_DATASET > >(i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView(i.writable, deref_dynamic_cast< Parameter< O::GET_BUFFER_VIEW > >(i.parameter.get())); - break; - case O::READ_ATT: - readAttribute(i.writable, deref_dynamic_cast< Parameter< O::READ_ATT > >(i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths(i.writable, deref_dynamic_cast< Parameter< O::LIST_PATHS > >(i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets(i.writable, deref_dynamic_cast< Parameter< O::LIST_DATASETS > >(i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes(i.writable, deref_dynamic_cast< Parameter< O::LIST_ATTS > >(i.parameter.get())); - break; - case O::ADVANCE: - advance(i.writable, deref_dynamic_cast< Parameter< O::ADVANCE > >(i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks(i.writable, deref_dynamic_cast< Parameter< O::AVAILABLE_CHUNKS > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS work queue"); + case O::EXTEND_DATASET: + extendDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CLOSE_PATH: + closePath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_DATASET: + openDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CLOSE_FILE: + closeFile( + i.writable, + *dynamic_cast *>( + i.parameter.get())); + break; + case O::DELETE_FILE: + deleteFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_PATH: + deletePath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_DATASET: + deleteDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_ATT: + deleteAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::WRITE_DATASET: + writeDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::READ_DATASET: + readDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::GET_BUFFER_VIEW: + getBufferView( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::READ_ATT: + readAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_PATHS: + listPaths( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_DATASETS: + listDatasets( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_ATTS: + listAttributes( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::ADVANCE: + advance( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::AVAILABLE_CHUNKS: + availableChunks( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS work " + "queue"); } - } catch (unsupported_data_error& e) + } + catch (...) { - handler->m_work.pop(); + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; + m_handler->m_work.pop(); throw; } handler->m_work.pop(); } int status; - for( auto& file : m_scheduledReads ) + for (auto &file : m_scheduledReads) { - status = adios_perform_reads(file.first, - 1); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to perform ADIOS reads during dataset reading"); - - for( auto& sel : file.second ) - adios_selection_delete(sel); + status = adios_perform_reads(file.first, 1); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to perform ADIOS reads during " + "dataset reading"); + + for (auto &sel : file.second) + adios_selection_delete(sel.selection); } m_scheduledReads.clear(); - return std::future< void >(); + return std::future(); } -void -ADIOS1IOHandlerImpl::init() +void ADIOS1IOHandlerImpl::init() { int status; status = adios_init_noxml(MPI_COMM_NULL); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS"); m_readMethod = ADIOS_READ_METHOD_BP; status = adios_read_init_method(m_readMethod, MPI_COMM_NULL, ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); - + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); } #endif #if openPMD_HAVE_ADIOS1 ADIOS1IOHandler::ADIOS1IOHandler(std::string path, Access at) - : AbstractIOHandler(std::move(path), at), - m_impl{new ADIOS1IOHandlerImpl(this)} + : AbstractIOHandler(std::move(path), at) + , m_impl{new ADIOS1IOHandlerImpl(this)} { m_impl->init(); } ADIOS1IOHandler::~ADIOS1IOHandler() = default; -std::future< void > -ADIOS1IOHandler::flush() +std::future ADIOS1IOHandler::flush(internal::FlushParams const &) { return m_impl->flush(); } -void -ADIOS1IOHandler::enqueue(IOTask const& i) +void ADIOS1IOHandler::enqueue(IOTask const &i) { - switch( i.operation ) + switch (i.operation) { - case Operation::CREATE_FILE: - case Operation::CREATE_PATH: - case Operation::OPEN_PATH: - case Operation::CREATE_DATASET: - case Operation::OPEN_FILE: - case Operation::WRITE_ATT: - m_setup.push(i); - return; - default: - m_work.push(i); - return; + case Operation::CREATE_FILE: + case Operation::CREATE_PATH: + case Operation::OPEN_PATH: + case Operation::CREATE_DATASET: + case Operation::OPEN_FILE: + case Operation::WRITE_ATT: + m_setup.push(i); + return; + default: + m_work.push(i); + return; } } -int64_t -ADIOS1IOHandlerImpl::open_write(Writable* writable) +int64_t ADIOS1IOHandlerImpl::open_write(Writable *writable) { auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); std::string mode; - if( m_existsOnDisk[res->second] ) + if (m_existsOnDisk[res->second]) { mode = "u"; /* close the handle that corresponds to the file we want to append to */ - if( m_openReadFileHandles.find(res->second) != m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(res->second) != + m_openReadFileHandles.end()) { close(m_openReadFileHandles[res->second]); m_openReadFileHandles.erase(res->second); @@ -283,39 +378,46 @@ ADIOS1IOHandlerImpl::open_write(Writable* writable) int64_t fd = -1; int status; - status = adios_open(&fd, - res->second->c_str(), - res->second->c_str(), - mode.c_str(), - MPI_COMM_NULL); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to open_write ADIOS file"); + status = adios_open( + &fd, + res->second->c_str(), + res->second->c_str(), + mode.c_str(), + MPI_COMM_NULL); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to open_write ADIOS file"); return fd; } -ADIOS_FILE* -ADIOS1IOHandlerImpl::open_read(std::string const & name) +ADIOS_FILE *ADIOS1IOHandlerImpl::open_read(std::string const &name) { ADIOS_FILE *f = nullptr; - f = adios_read_open_file(name.c_str(), - m_readMethod, - MPI_COMM_NULL); - VERIFY(adios_errno != err_file_not_found, "[ADIOS1] Internal error: ADIOS file not found"); - VERIFY(f != nullptr, "[ADIOS1] Internal error: Failed to open_read ADIOS file"); + f = adios_read_open_file(name.c_str(), m_readMethod, MPI_COMM_NULL); + VERIFY( + adios_errno != err_file_not_found, + "[ADIOS1] Internal error: ADIOS file not found"); + VERIFY( + f != nullptr, + "[ADIOS1] Internal error: Failed to open_read ADIOS file"); return f; } -int64_t -ADIOS1IOHandlerImpl::initialize_group(std::string const &name) +int64_t ADIOS1IOHandlerImpl::initialize_group(std::string const &name) { int status; int64_t group; ADIOS_STATISTICS_FLAG noStatistics = adios_stat_no; status = adios_declare_group(&group, name.c_str(), "", noStatistics); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to declare ADIOS group"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to declare ADIOS group"); status = adios_select_method(group, "POSIX", "", ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to select ADIOS method"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to select ADIOS method"); return group; } @@ -327,17 +429,16 @@ ADIOS1IOHandlerImpl::initialize_group(std::string const &name) #else ADIOS1IOHandler::ADIOS1IOHandler(std::string path, Access at) - : AbstractIOHandler(std::move(path), at) + : AbstractIOHandler(std::move(path), at) { throw std::runtime_error("openPMD-api built without ADIOS1 support"); } ADIOS1IOHandler::~ADIOS1IOHandler() = default; -std::future< void > -ADIOS1IOHandler::flush() +std::future ADIOS1IOHandler::flush(internal::FlushParams const &) { - return std::future< void >(); + return std::future(); } #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/ADIOS/ADIOS2Auxiliary.cpp b/src/IO/ADIOS/ADIOS2Auxiliary.cpp index ca20b7dae9..9f5ee46af7 100644 --- a/src/IO/ADIOS/ADIOS2Auxiliary.cpp +++ b/src/IO/ADIOS/ADIOS2Auxiliary.cpp @@ -21,8 +21,8 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS2 -#include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include @@ -30,89 +30,84 @@ namespace openPMD { namespace detail { - template< typename T > - std::string - ToDatatypeHelper< T >::type() + template + std::string ToDatatypeHelper::type() { - return adios2::GetType< T >(); + return adios2::GetType(); } - template< typename T > - std::string - ToDatatypeHelper< std::vector< T > >::type() + template + std::string ToDatatypeHelper >::type() { return - adios2::GetType< T >(); + adios2::GetType(); } - template< typename T, size_t n > - std::string - ToDatatypeHelper< std::array< T, n > >::type() + template + std::string ToDatatypeHelper >::type() { return - adios2::GetType< T >(); + adios2::GetType(); } - std::string - ToDatatypeHelper< bool >::type() + std::string ToDatatypeHelper::type() { - return ToDatatypeHelper< bool_representation >::type(); + return ToDatatypeHelper::type(); } - template< typename T > - std::string - ToDatatype::operator()() + template + std::string ToDatatype::operator()() { - return ToDatatypeHelper< T >::type(); + return ToDatatypeHelper::type(); } - template< int n > - std::string - ToDatatype::operator()() + template + std::string ToDatatype::operator()() { return ""; } - Datatype fromADIOS2Type( std::string const & dt, bool verbose ) + Datatype fromADIOS2Type(std::string const &dt, bool verbose) { - static std::map< std::string, Datatype > map{ - { "string", Datatype::STRING }, - { "char", Datatype::CHAR }, - { "signed char", Datatype::CHAR }, - { "unsigned char", Datatype::UCHAR }, - { "short", Datatype::SHORT }, - { "unsigned short", Datatype::USHORT }, - { "int", Datatype::INT }, - { "unsigned int", Datatype::UINT }, - { "long int", Datatype::LONG }, - { "unsigned long int", Datatype::ULONG }, - { "long long int", Datatype::LONGLONG }, - { "unsigned long long int", Datatype::ULONGLONG }, - { "float", Datatype::FLOAT }, - { "double", Datatype::DOUBLE }, - { "long double", Datatype::LONG_DOUBLE }, - { "float complex", Datatype::CFLOAT }, - { "double complex", Datatype::CDOUBLE }, - { "long double complex", Datatype::CLONG_DOUBLE }, // does not exist as of 2.7.0 but might come later - { "uint8_t", Datatype::UCHAR }, - { "int8_t", Datatype::CHAR }, - { "uint16_t", determineDatatype< uint16_t >() }, - { "int16_t", determineDatatype< int16_t >() }, - { "uint32_t", determineDatatype< uint32_t >() }, - { "int32_t", determineDatatype< int32_t >() }, - { "uint64_t", determineDatatype< uint64_t >() }, - { "int64_t", determineDatatype< int64_t >() } - }; - auto it = map.find( dt ); - if( it != map.end() ) + static std::map map{ + {"string", Datatype::STRING}, + {"char", Datatype::CHAR}, + {"signed char", Datatype::CHAR}, + {"unsigned char", Datatype::UCHAR}, + {"short", Datatype::SHORT}, + {"unsigned short", Datatype::USHORT}, + {"int", Datatype::INT}, + {"unsigned int", Datatype::UINT}, + {"long int", Datatype::LONG}, + {"unsigned long int", Datatype::ULONG}, + {"long long int", Datatype::LONGLONG}, + {"unsigned long long int", Datatype::ULONGLONG}, + {"float", Datatype::FLOAT}, + {"double", Datatype::DOUBLE}, + {"long double", Datatype::LONG_DOUBLE}, + {"float complex", Datatype::CFLOAT}, + {"double complex", Datatype::CDOUBLE}, + {"long double complex", + Datatype::CLONG_DOUBLE}, // does not exist as of 2.7.0 but might + // come later + {"uint8_t", Datatype::UCHAR}, + {"int8_t", Datatype::CHAR}, + {"uint16_t", determineDatatype()}, + {"int16_t", determineDatatype()}, + {"uint32_t", determineDatatype()}, + {"int32_t", determineDatatype()}, + {"uint64_t", determineDatatype()}, + {"int64_t", determineDatatype()}}; + auto it = map.find(dt); + if (it != map.end()) { return it->second; } else { - if( verbose ) + if (verbose) { std::cerr << "[ADIOS2] Warning: Encountered unknown ADIOS2 datatype," @@ -123,74 +118,69 @@ namespace detail } } - template< typename T > - Extent - AttributeInfo::operator()( - adios2::IO & IO, - std::string const & attributeName, - VariableOrAttribute voa ) + template + Extent AttributeInfo::operator()( + adios2::IO &IO, + std::string const &attributeName, + VariableOrAttribute voa) { - switch( voa ) + switch (voa) { - case VariableOrAttribute::Attribute: + case VariableOrAttribute::Attribute: { + auto attribute = IO.InquireAttribute(attributeName); + if (!attribute) { - auto attribute = IO.InquireAttribute< T >( attributeName ); - if( !attribute ) - { - throw std::runtime_error( - "[ADIOS2] Internal error: Attribute not present." ); - } - return { attribute.Data().size() }; + throw std::runtime_error( + "[ADIOS2] Internal error: Attribute not present."); } - case VariableOrAttribute::Variable: + return {attribute.Data().size()}; + } + case VariableOrAttribute::Variable: { + auto variable = IO.InquireVariable(attributeName); + if (!variable) { - auto variable = IO.InquireVariable< T >( attributeName ); - if( !variable ) - { - throw std::runtime_error( - "[ADIOS2] Internal error: Variable not present." ); - } - auto shape = variable.Shape(); - Extent res; - res.reserve( shape.size() ); - for( auto val : shape ) - { - res.push_back( val ); - } - return res; + throw std::runtime_error( + "[ADIOS2] Internal error: Variable not present."); } - default: - throw std::runtime_error( "[ADIOS2] Unreachable!" ); + auto shape = variable.Shape(); + Extent res; + res.reserve(shape.size()); + for (auto val : shape) + { + res.push_back(val); + } + return res; + } + default: + throw std::runtime_error("[ADIOS2] Unreachable!"); } } - template< int n, typename... Params > - Extent - AttributeInfo::operator()( Params &&... ) + template + Extent AttributeInfo::operator()(Params &&...) { - return { 0 }; + return {0}; } - Datatype - attributeInfo( - adios2::IO & IO, - std::string const & attributeName, + Datatype attributeInfo( + adios2::IO &IO, + std::string const &attributeName, bool verbose, - VariableOrAttribute voa ) + VariableOrAttribute voa) { std::string type; - switch( voa ) + switch (voa) { - case VariableOrAttribute::Attribute: - type = IO.AttributeType( attributeName ); - break; - case VariableOrAttribute::Variable: - type = IO.VariableType( attributeName ); - break; + case VariableOrAttribute::Attribute: + type = IO.AttributeType(attributeName); + break; + case VariableOrAttribute::Variable: + type = IO.VariableType(attributeName); + break; } - if( type.empty() ) + if (type.empty()) { - if( verbose ) + if (verbose) { std::cerr << "[ADIOS2] Warning: Attribute with name " << attributeName << " has no type in backend." @@ -201,62 +191,57 @@ namespace detail else { static AttributeInfo ai; - Datatype basicType = fromADIOS2Type( type ); + Datatype basicType = fromADIOS2Type(type); Extent shape = switchAdios2AttributeType( - basicType, ai, IO, attributeName, voa ); + basicType, ai, IO, attributeName, voa); - switch( voa ) + switch (voa) { - case VariableOrAttribute::Attribute: + case VariableOrAttribute::Attribute: { + auto size = shape[0]; + Datatype openPmdType = size == 1 ? basicType + : size == 7 && basicType == Datatype::DOUBLE + ? Datatype::ARR_DBL_7 + : toVectorType(basicType); + return openPmdType; + } + case VariableOrAttribute::Variable: { + if (shape.size() == 0 || (shape.size() == 1 && shape[0] == 1)) + { + // global single value variable + return basicType; + } + else if (shape.size() == 1) { - auto size = shape[ 0 ]; - Datatype openPmdType = size == 1 - ? basicType - : size == 7 && basicType == Datatype::DOUBLE - ? Datatype::ARR_DBL_7 - : toVectorType( basicType ); + auto size = shape[0]; + Datatype openPmdType = + size == 7 && basicType == Datatype::DOUBLE + ? Datatype::ARR_DBL_7 + : toVectorType(basicType); return openPmdType; } - case VariableOrAttribute::Variable: + else if ( + shape.size() == 2 && + (basicType == Datatype::CHAR || + basicType == Datatype::UCHAR)) { - if( shape.size() == 0 || - ( shape.size() == 1 && shape[ 0 ] == 1 ) ) - { - // global single value variable - return basicType; - } - else if( shape.size() == 1 ) - { - auto size = shape[ 0 ]; - Datatype openPmdType = - size == 7 && basicType == Datatype::DOUBLE - ? Datatype::ARR_DBL_7 - : toVectorType( basicType ); - return openPmdType; - } - else if( - shape.size() == 2 && - ( basicType == Datatype::CHAR || - basicType == Datatype::UCHAR ) ) - { - return Datatype::VEC_STRING; - } - else + return Datatype::VEC_STRING; + } + else + { + std::stringstream errorMsg; + errorMsg << "[ADIOS2] Unexpected shape for " + << attributeName << ": ["; + for (auto const ext : shape) { - std::stringstream errorMsg; - errorMsg << "[ADIOS2] Unexpected shape for " - << attributeName << ": ["; - for( auto const ext : shape ) - { - errorMsg << std::to_string( ext ) << ", "; - } - errorMsg << "] of type " - << datatypeToString( basicType ); - throw std::runtime_error( errorMsg.str() ); + errorMsg << std::to_string(ext) << ", "; } + errorMsg << "] of type " << datatypeToString(basicType); + throw std::runtime_error(errorMsg.str()); } } - throw std::runtime_error( "Unreachable!" ); + } + throw std::runtime_error("Unreachable!"); } } } // namespace detail diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 04d53249e3..53df53b865 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -37,56 +37,53 @@ #include #include - namespace openPMD { #if openPMD_USE_VERIFY -#define VERIFY( CONDITION, TEXT ) \ +#define VERIFY(CONDITION, TEXT) \ { \ - if ( !( CONDITION ) ) \ - throw std::runtime_error( ( TEXT ) ); \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ } #else -#define VERIFY( CONDITION, TEXT ) \ +#define VERIFY(CONDITION, TEXT) \ do \ { \ - (void)sizeof( CONDITION ); \ - } while ( 0 ); + (void)sizeof(CONDITION); \ + } while (0); #endif -#define VERIFY_ALWAYS( CONDITION, TEXT ) \ +#define VERIFY_ALWAYS(CONDITION, TEXT) \ { \ - if ( !( CONDITION ) ) \ - throw std::runtime_error( ( TEXT ) ); \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ } #if openPMD_HAVE_ADIOS2 -# if openPMD_HAVE_MPI +#if openPMD_HAVE_MPI ADIOS2IOHandlerImpl::ADIOS2IOHandlerImpl( - AbstractIOHandler * handler, + AbstractIOHandler *handler, MPI_Comm communicator, nlohmann::json cfg, - std::string engineType ) - : AbstractIOHandlerImplCommon( handler ) - , m_ADIOS{ communicator, ADIOS2_DEBUG_MODE } - , m_engineType( std::move( engineType ) ) + std::string engineType) + : AbstractIOHandlerImplCommon(handler) + , m_ADIOS{communicator} + , m_engineType(std::move(engineType)) { - init( std::move( cfg ) ); + init(std::move(cfg)); } -# endif // openPMD_HAVE_MPI +#endif // openPMD_HAVE_MPI ADIOS2IOHandlerImpl::ADIOS2IOHandlerImpl( - AbstractIOHandler * handler, - nlohmann::json cfg, - std::string engineType ) - : AbstractIOHandlerImplCommon( handler ) - , m_ADIOS{ ADIOS2_DEBUG_MODE } - , m_engineType( std::move( engineType ) ) + AbstractIOHandler *handler, nlohmann::json cfg, std::string engineType) + : AbstractIOHandlerImplCommon(handler) + , m_ADIOS{} + , m_engineType(std::move(engineType)) { - init( std::move( cfg ) ); + init(std::move(cfg)); } ADIOS2IOHandlerImpl::~ADIOS2IOHandlerImpl() @@ -97,55 +94,50 @@ ADIOS2IOHandlerImpl::~ADIOS2IOHandlerImpl() * This means that destruction order is nondeterministic. * Let's determinize it (necessary if computing in parallel). */ - using file_t = std::unique_ptr< detail::BufferedActions >; - std::vector< file_t > sorted; - sorted.reserve( m_fileData.size() ); - for( auto & pair : m_fileData ) + using file_t = std::unique_ptr; + std::vector sorted; + sorted.reserve(m_fileData.size()); + for (auto &pair : m_fileData) { - sorted.push_back( std::move( pair.second ) ); + sorted.push_back(std::move(pair.second)); } m_fileData.clear(); std::sort( - sorted.begin(), - sorted.end(), - []( auto const & left, auto const & right ) { + sorted.begin(), sorted.end(), [](auto const &left, auto const &right) { return left->m_file <= right->m_file; - } ); + }); // run the destructors - for( auto & file : sorted ) + for (auto &file : sorted) { // std::unique_ptr interface file.reset(); } } -void -ADIOS2IOHandlerImpl::init( nlohmann::json cfg ) +void ADIOS2IOHandlerImpl::init(nlohmann::json cfg) { - if( cfg.contains( "adios2" ) ) + if (cfg.contains("adios2")) { - m_config = std::move( cfg[ "adios2" ] ); + m_config = std::move(cfg["adios2"]); - if( m_config.json().contains( "schema" ) ) + if (m_config.json().contains("schema")) { - m_schema = - m_config[ "schema" ].json().get< ADIOS2Schema::schema_t >(); + m_schema = m_config["schema"].json().get(); } - if( m_config.json().contains( "use_span_based_put" ) ) + if (m_config.json().contains("use_span_based_put")) { m_useSpanBasedPutByDefault = - m_config[ "use_span_based_put" ].json().get< bool >() - ? UseSpan::Yes - : UseSpan::No; + m_config["use_span_based_put"].json().get() ? UseSpan::Yes + : UseSpan::No; } - auto engineConfig = config( ADIOS2Defaults::str_engine ); - if( !engineConfig.json().is_null() ) + auto engineConfig = config(ADIOS2Defaults::str_engine); + if (!engineConfig.json().is_null()) { auto engineTypeConfig = - config( ADIOS2Defaults::str_type, engineConfig ).json(); - if( !engineTypeConfig.is_null() ) + config(ADIOS2Defaults::str_type, engineConfig).json(); + if (!engineTypeConfig.is_null()) { // convert to string m_engineType = engineTypeConfig; @@ -153,83 +145,86 @@ ADIOS2IOHandlerImpl::init( nlohmann::json cfg ) m_engineType.begin(), m_engineType.end(), m_engineType.begin(), - []( unsigned char c ) { return std::tolower( c ); } ); + [](unsigned char c) { return std::tolower(c); }); } } auto operators = getOperators(); - if( operators ) + if (operators) { - defaultOperators = std::move( operators.get() ); + defaultOperators = std::move(operators.get()); } } // environment-variable based configuration - m_schema = auxiliary::getEnvNum( "OPENPMD2_ADIOS2_SCHEMA", m_schema ); + m_schema = auxiliary::getEnvNum("OPENPMD2_ADIOS2_SCHEMA", m_schema); } -auxiliary::Option< std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > > -ADIOS2IOHandlerImpl::getOperators( auxiliary::TracingJSON cfg ) +auxiliary::Option > +ADIOS2IOHandlerImpl::getOperators(auxiliary::TracingJSON cfg) { - using ret_t = auxiliary::Option< std::vector< ParameterizedOperator > >; - std::vector< ParameterizedOperator > res; - if( !cfg.json().contains( "dataset" ) ) + using ret_t = auxiliary::Option >; + std::vector res; + if (!cfg.json().contains("dataset")) { return ret_t(); } - auto datasetConfig = cfg[ "dataset" ]; - if( !datasetConfig.json().contains( "operators" ) ) + auto datasetConfig = cfg["dataset"]; + if (!datasetConfig.json().contains("operators")) { return ret_t(); } - auto _operators = datasetConfig[ "operators" ]; - nlohmann::json const & operators = _operators.json(); - for( auto operatorIterator = operators.begin(); + auto _operators = datasetConfig["operators"]; + nlohmann::json const &operators = _operators.json(); + for (auto operatorIterator = operators.begin(); operatorIterator != operators.end(); - ++operatorIterator ) + ++operatorIterator) { - nlohmann::json const & op = operatorIterator.value(); - std::string const & type = op[ "type" ]; + nlohmann::json const &op = operatorIterator.value(); + std::string const &type = op["type"]; adios2::Params adiosParams; - if( op.contains( "parameters" ) ) + if (op.contains("parameters")) { - nlohmann::json const & params = op[ "parameters" ]; - for( auto paramIterator = params.begin(); + nlohmann::json const ¶ms = op["parameters"]; + for (auto paramIterator = params.begin(); paramIterator != params.end(); - ++paramIterator ) + ++paramIterator) { - adiosParams[ paramIterator.key() ] = - paramIterator.value().get< std::string >(); + adiosParams[paramIterator.key()] = + paramIterator.value().get(); } } - auxiliary::Option< adios2::Operator > adiosOperator = - getCompressionOperator( type ); - if( adiosOperator ) + auxiliary::Option adiosOperator = + getCompressionOperator(type); + if (adiosOperator) { - res.emplace_back( ParameterizedOperator{ - adiosOperator.get(), std::move( adiosParams ) } ); + res.emplace_back(ParameterizedOperator{ + adiosOperator.get(), std::move(adiosParams)}); } } _operators.declareFullyRead(); - return auxiliary::makeOption( std::move( res ) ); + return auxiliary::makeOption(std::move(res)); } -auxiliary::Option< std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > > +auxiliary::Option > ADIOS2IOHandlerImpl::getOperators() { - return getOperators( m_config ); + return getOperators(m_config); } -std::string -ADIOS2IOHandlerImpl::fileSuffix() const +std::string ADIOS2IOHandlerImpl::fileSuffix() const { // SST engine adds its suffix unconditionally // so we don't add it - static std::map< std::string, std::string > endings{ - { "sst", "" }, { "staging", "" }, { "bp4", ".bp" }, - { "bp3", ".bp" }, { "file", ".bp" }, { "hdf5", ".h5" }, - { "nullcore", ".nullcore" }, { "ssc", ".ssc" } - }; - auto it = endings.find( m_engineType ); - if( it != endings.end() ) + static std::map endings{ + {"sst", ""}, + {"staging", ""}, + {"bp4", ".bp"}, + {"bp3", ".bp"}, + {"file", ".bp"}, + {"hdf5", ".h5"}, + {"nullcore", ".nullcore"}, + {"ssc", ".ssc"}}; + auto it = endings.find(m_engineType); + if (it != endings.end()) { return it->second; } @@ -239,138 +234,137 @@ ADIOS2IOHandlerImpl::fileSuffix() const } } -std::future< void > -ADIOS2IOHandlerImpl::flush() +std::future +ADIOS2IOHandlerImpl::flush(internal::FlushParams const &flushParams) { auto res = AbstractIOHandlerImpl::flush(); - for ( auto & p : m_fileData ) + for (auto &p : m_fileData) { - if ( m_dirty.find( p.first ) != m_dirty.end( ) ) + if (m_dirty.find(p.first) != m_dirty.end()) { - p.second->flush( m_handler->m_flushLevel, /* writeAttributes = */ false ); + p.second->flush( + flushParams.flushLevel, /* writeAttributes = */ false); } else { - p.second->drop( ); + p.second->drop(); } } return res; } void ADIOS2IOHandlerImpl::createFile( - Writable * writable, - Parameter< Operation::CREATE_FILE > const & parameters ) + Writable *writable, Parameter const ¶meters) { - VERIFY_ALWAYS( m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Creating a file in read-only mode is not possible." ); + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[ADIOS2] Creating a file in read-only mode is not possible."); - if ( !writable->written ) + if (!writable->written) { std::string name = parameters.name; - std::string suffix( fileSuffix() ); - if( !auxiliary::ends_with( name, suffix ) ) + std::string suffix(fileSuffix()); + if (!auxiliary::ends_with(name, suffix)) { name += suffix; } - auto res_pair = getPossiblyExisting( name ); - InvalidatableFile shared_name = InvalidatableFile( name ); + auto res_pair = getPossiblyExisting(name); + InvalidatableFile shared_name = InvalidatableFile(name); VERIFY_ALWAYS( - !( m_handler->m_backendAccess == Access::READ_WRITE && - ( !std::get< PE_NewlyCreated >( res_pair ) || - auxiliary::file_exists( fullPath( - std::get< PE_InvalidatableFile >( res_pair ) ) ) ) ), - "[ADIOS2] Can only overwrite existing file in CREATE mode." ); + !(m_handler->m_backendAccess == Access::READ_WRITE && + (!std::get(res_pair) || + auxiliary::file_exists( + fullPath(std::get(res_pair))))), + "[ADIOS2] Can only overwrite existing file in CREATE mode."); - if ( !std::get< PE_NewlyCreated >( res_pair ) ) + if (!std::get(res_pair)) { - auto file = std::get< PE_InvalidatableFile >( res_pair ); - m_dirty.erase( file ); - dropFileData( file ); - file.invalidate( ); + auto file = std::get(res_pair); + m_dirty.erase(file); + dropFileData(file); + file.invalidate(); } - std::string const dir( m_handler->directory ); - if ( !auxiliary::directory_exists( dir ) ) + std::string const dir(m_handler->directory); + if (!auxiliary::directory_exists(dir)) { - auto success = auxiliary::create_directories( dir ); - VERIFY( success, "[ADIOS2] Could not create directory." ); + auto success = auxiliary::create_directories(dir); + VERIFY(success, "[ADIOS2] Could not create directory."); } m_iterationEncoding = parameters.encoding; - associateWithFile( writable, shared_name ); - this->m_dirty.emplace( shared_name ); - getFileData( shared_name, IfFileNotOpen::OpenImplicitly ).m_mode = + associateWithFile(writable, shared_name); + this->m_dirty.emplace(shared_name); + getFileData(shared_name, IfFileNotOpen::OpenImplicitly).m_mode = adios2::Mode::Write; // WORKAROUND // ADIOS2 does not yet implement ReadWrite Mode writable->written = true; - writable->abstractFilePosition = - std::make_shared< ADIOS2FilePosition >( ); + writable->abstractFilePosition = std::make_shared(); // enforce opening the file // lazy opening is deathly in parallel situations - getFileData( shared_name, IfFileNotOpen::OpenImplicitly ); + getFileData(shared_name, IfFileNotOpen::OpenImplicitly); } } void ADIOS2IOHandlerImpl::createPath( - Writable * writable, - const Parameter< Operation::CREATE_PATH > & parameters ) + Writable *writable, const Parameter ¶meters) { std::string path; - refreshFileFromParent( writable, /* preferParentFile = */ true ); + refreshFileFromParent(writable, /* preferParentFile = */ true); /* Sanitize path */ - if ( !auxiliary::starts_with( parameters.path, '/' ) ) + if (!auxiliary::starts_with(parameters.path, '/')) { - path = filePositionToString( setAndGetFilePosition( writable ) ) + "/" + - auxiliary::removeSlashes( parameters.path ); + path = filePositionToString(setAndGetFilePosition(writable)) + "/" + + auxiliary::removeSlashes(parameters.path); } else { - path = "/" + auxiliary::removeSlashes( parameters.path ); + path = "/" + auxiliary::removeSlashes(parameters.path); } /* ADIOS has no concept for explicitly creating paths. * They are implicitly created with the paths of variables/attributes. */ writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS2FilePosition >( - path, ADIOS2FilePosition::GD::GROUP ); + writable->abstractFilePosition = std::make_shared( + path, ADIOS2FilePosition::GD::GROUP); } void ADIOS2IOHandlerImpl::createDataset( - Writable * writable, - const Parameter< Operation::CREATE_DATASET > & parameters ) + Writable *writable, const Parameter ¶meters) { - if ( m_handler->m_backendAccess == Access::READ_ONLY ) + if (m_handler->m_backendAccess == Access::READ_ONLY) { - throw std::runtime_error( "[ADIOS2] Creating a dataset in a file opened as read " - "only is not possible." ); + throw std::runtime_error( + "[ADIOS2] Creating a dataset in a file opened as read " + "only is not possible."); } - if ( !writable->written ) + if (!writable->written) { /* Sanitize name */ - std::string name = auxiliary::removeSlashes( parameters.name ); + std::string name = auxiliary::removeSlashes(parameters.name); auto const file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto filePos = setAndGetFilePosition( writable, name ); + refreshFileFromParent(writable, /* preferParentFile = */ false); + auto filePos = setAndGetFilePosition(writable, name); filePos->gd = ADIOS2FilePosition::GD::DATASET; - auto const varName = nameOfVariable( writable ); + auto const varName = nameOfVariable(writable); - std::vector< ParameterizedOperator > operators; - nlohmann::json options = nlohmann::json::parse( parameters.options ); - if( options.contains( "adios2" ) ) + std::vector operators; + nlohmann::json options = nlohmann::json::parse(parameters.options); + if (options.contains("adios2")) { - auxiliary::TracingJSON datasetConfig( options[ "adios2" ] ); - auto datasetOperators = getOperators( datasetConfig ); + auxiliary::TracingJSON datasetConfig(options["adios2"]); + auto datasetOperators = getOperators(datasetConfig); - operators = datasetOperators ? std::move( datasetOperators.get() ) + operators = datasetOperators ? std::move(datasetOperators.get()) : defaultOperators; auto shadow = datasetConfig.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { std::cerr << "Warning: parts of the JSON configuration for " "ADIOS2 dataset '" @@ -383,31 +377,32 @@ void ADIOS2IOHandlerImpl::createDataset( operators = defaultOperators; } - if( !parameters.compression.empty() ) + if (!parameters.compression.empty()) { - auxiliary::Option< adios2::Operator > adiosOperator = - getCompressionOperator( parameters.compression ); - if( adiosOperator ) + auxiliary::Option adiosOperator = + getCompressionOperator(parameters.compression); + if (adiosOperator) { - operators.push_back( ParameterizedOperator{ - adiosOperator.get(), adios2::Params() } ); + operators.push_back(ParameterizedOperator{ + adiosOperator.get(), adios2::Params()}); } } // cast from openPMD::Extent to adios2::Dims - adios2::Dims const shape( parameters.extent.begin(), parameters.extent.end() ); + adios2::Dims const shape( + parameters.extent.begin(), parameters.extent.end()); - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); switchAdios2VariableType( parameters.dtype, detail::VariableDefiner(), fileData.m_IO, varName, operators, - shape ); + shape); fileData.invalidateVariablesMap(); writable->written = true; - m_dirty.emplace( file ); + m_dirty.emplace(file); } } @@ -415,94 +410,85 @@ namespace detail { struct DatasetExtender { - template< typename T, typename... Args > - void - operator()( - adios2::IO & IO, - std::string const & variable, - Extent const & newShape ) + template + void operator()( + adios2::IO &IO, std::string const &variable, Extent const &newShape) { - auto var = IO.InquireVariable< T >( variable ); - if( !var ) + auto var = IO.InquireVariable(variable); + if (!var) { throw std::runtime_error( "[ADIOS2] Unable to retrieve variable for resizing: '" + - variable + "'." ); + variable + "'."); } adios2::Dims dims; - dims.reserve( newShape.size() ); - for( auto ext : newShape ) + dims.reserve(newShape.size()); + for (auto ext : newShape) { - dims.push_back( ext ); + dims.push_back(ext); } - var.SetShape( dims ); + var.SetShape(dims); } std::string errorMsg = "ADIOS2: extendDataset()"; }; } // namespace detail -void -ADIOS2IOHandlerImpl::extendDataset( - Writable * writable, - const Parameter< Operation::EXTEND_DATASET > & parameters ) +void ADIOS2IOHandlerImpl::extendDataset( + Writable *writable, const Parameter ¶meters) { VERIFY_ALWAYS( m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot extend datasets in read-only mode." ); - setAndGetFilePosition( writable ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - std::string name = nameOfVariable( writable ); - auto & filedata = getFileData( file, IfFileNotOpen::ThrowError ); + "[ADIOS2] Cannot extend datasets in read-only mode."); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + std::string name = nameOfVariable(writable); + auto &filedata = getFileData(file, IfFileNotOpen::ThrowError); static detail::DatasetExtender de; - Datatype dt = detail::fromADIOS2Type( filedata.m_IO.VariableType( name ) ); - switchAdios2VariableType( dt, de, filedata.m_IO, name, parameters.extent ); + Datatype dt = detail::fromADIOS2Type(filedata.m_IO.VariableType(name)); + switchAdios2VariableType(dt, de, filedata.m_IO, name, parameters.extent); } -void -ADIOS2IOHandlerImpl::openFile( - Writable * writable, - const Parameter< Operation::OPEN_FILE > & parameters ) +void ADIOS2IOHandlerImpl::openFile( + Writable *writable, const Parameter ¶meters) { - if ( !auxiliary::directory_exists( m_handler->directory ) ) + if (!auxiliary::directory_exists(m_handler->directory)) { - throw no_such_file_error( "[ADIOS2] Supplied directory is not valid: " + - m_handler->directory ); + throw no_such_file_error( + "[ADIOS2] Supplied directory is not valid: " + + m_handler->directory); } std::string name = parameters.name; - std::string suffix( fileSuffix() ); - if( !auxiliary::ends_with( name, suffix ) ) + std::string suffix(fileSuffix()); + if (!auxiliary::ends_with(name, suffix)) { name += suffix; } - auto file = std::get< PE_InvalidatableFile >( getPossiblyExisting( name ) ); + auto file = std::get(getPossiblyExisting(name)); - associateWithFile( writable, file ); + associateWithFile(writable, file); writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS2FilePosition >( ); + writable->abstractFilePosition = std::make_shared(); m_iterationEncoding = parameters.encoding; // enforce opening the file // lazy opening is deathly in parallel situations - getFileData( file, IfFileNotOpen::OpenImplicitly ); + getFileData(file, IfFileNotOpen::OpenImplicitly); } -void -ADIOS2IOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & ) +void ADIOS2IOHandlerImpl::closeFile( + Writable *writable, Parameter const &) { - auto fileIterator = m_files.find( writable ); - if ( fileIterator != m_files.end( ) ) + auto fileIterator = m_files.find(writable); + if (fileIterator != m_files.end()) { // do not invalidate the file // it still exists, it is just not open - auto it = m_fileData.find( fileIterator->second ); - if ( it != m_fileData.end( ) ) + auto it = m_fileData.find(fileIterator->second); + if (it != m_fileData.end()) { /* * No need to finalize unconditionally, destructor will take care @@ -510,253 +496,240 @@ ADIOS2IOHandlerImpl::closeFile( */ it->second->flush( FlushLevel::UserFlush, - []( detail::BufferedActions & ba, adios2::Engine & ) { + [](detail::BufferedActions &ba, adios2::Engine &) { ba.finalize(); }, /* writeAttributes = */ true, - /* flushUnconditionally = */ false ); - m_fileData.erase( it ); + /* flushUnconditionally = */ false); + m_fileData.erase(it); } } } void ADIOS2IOHandlerImpl::openPath( - Writable * writable, const Parameter< Operation::OPEN_PATH > & parameters ) + Writable *writable, const Parameter ¶meters) { /* Sanitize path */ - refreshFileFromParent( writable, /* preferParentFile = */ true ); + refreshFileFromParent(writable, /* preferParentFile = */ true); std::string prefix = - filePositionToString( setAndGetFilePosition( writable->parent ) ); - std::string suffix = auxiliary::removeSlashes( parameters.path ); - std::string infix = suffix.empty() || auxiliary::ends_with( prefix, '/' ) - ? "" - : "/"; + filePositionToString(setAndGetFilePosition(writable->parent)); + std::string suffix = auxiliary::removeSlashes(parameters.path); + std::string infix = + suffix.empty() || auxiliary::ends_with(prefix, '/') ? "" : "/"; /* ADIOS has no concept for explicitly creating paths. * They are implicitly created with the paths of variables/attributes. */ - writable->abstractFilePosition = std::make_shared< ADIOS2FilePosition >( - prefix + infix + suffix, ADIOS2FilePosition::GD::GROUP ); + writable->abstractFilePosition = std::make_shared( + prefix + infix + suffix, ADIOS2FilePosition::GD::GROUP); writable->written = true; } void ADIOS2IOHandlerImpl::openDataset( - Writable * writable, Parameter< Operation::OPEN_DATASET > & parameters ) + Writable *writable, Parameter ¶meters) { - auto name = auxiliary::removeSlashes( parameters.name ); + auto name = auxiliary::removeSlashes(parameters.name); writable->abstractFilePosition.reset(); - auto pos = setAndGetFilePosition( writable, name ); + auto pos = setAndGetFilePosition(writable, name); pos->gd = ADIOS2FilePosition::GD::DATASET; - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto varName = nameOfVariable( writable ); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto varName = nameOfVariable(writable); *parameters.dtype = - detail::fromADIOS2Type( getFileData( file, IfFileNotOpen::ThrowError ) - .m_IO.VariableType( varName ) ); + detail::fromADIOS2Type(getFileData(file, IfFileNotOpen::ThrowError) + .m_IO.VariableType(varName)); switchAdios2VariableType( *parameters.dtype, - detail::DatasetOpener( this ), + detail::DatasetOpener(this), file, varName, - parameters ); + parameters); writable->written = true; } void ADIOS2IOHandlerImpl::deleteFile( - Writable *, const Parameter< Operation::DELETE_FILE > & ) + Writable *, const Parameter &) { - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } void ADIOS2IOHandlerImpl::deletePath( - Writable *, const Parameter< Operation::DELETE_PATH > & ) + Writable *, const Parameter &) { - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } -void -ADIOS2IOHandlerImpl::deleteDataset( - Writable *, - const Parameter< Operation::DELETE_DATASET > & ) +void ADIOS2IOHandlerImpl::deleteDataset( + Writable *, const Parameter &) { // call filedata.invalidateVariablesMap - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } void ADIOS2IOHandlerImpl::deleteAttribute( - Writable *, const Parameter< Operation::DELETE_ATT > & ) + Writable *, const Parameter &) { // call filedata.invalidateAttributesMap - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } void ADIOS2IOHandlerImpl::writeDataset( - Writable * writable, - const Parameter< Operation::WRITE_DATASET > & parameters ) + Writable *writable, const Parameter ¶meters) { VERIFY_ALWAYS( m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write data in read-only mode." ); - setAndGetFilePosition( writable ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); + "[ADIOS2] Cannot write data in read-only mode."); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); detail::BufferedPut bp; - bp.name = nameOfVariable( writable ); + bp.name = nameOfVariable(writable); bp.param = parameters; - ba.enqueue( std::move( bp ) ); - m_dirty.emplace( std::move( file ) ); + ba.enqueue(std::move(bp)); + m_dirty.emplace(std::move(file)); writable->written = true; // TODO erst nach dem Schreiben? } void ADIOS2IOHandlerImpl::writeAttribute( - Writable * writable, const Parameter< Operation::WRITE_ATT > & parameters ) + Writable *writable, const Parameter ¶meters) { - switch( attributeLayout() ) - { - case AttributeLayout::ByAdiosAttributes: - switchType( - parameters.dtype, - detail::OldAttributeWriter(), - this, - writable, - parameters ); - break; - case AttributeLayout::ByAdiosVariables: { - VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write attribute in read-only mode." ); - auto pos = setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( - writable, /* preferParentFile = */ false ); - auto fullName = nameOfAttribute( writable, parameters.name ); - auto prefix = filePositionToString( pos ); - - auto & filedata = getFileData( file, IfFileNotOpen::ThrowError ); - filedata.invalidateAttributesMap(); - m_dirty.emplace( std::move( file ) ); - - // this intentionally overwrites previous writes - auto & bufferedWrite = filedata.m_attributeWrites[ fullName ]; - bufferedWrite.name = fullName; - bufferedWrite.dtype = parameters.dtype; - bufferedWrite.resource = parameters.resource; - break; - } - default: - throw std::runtime_error( "Unreachable!" ); + switch (attributeLayout()) + { + case AttributeLayout::ByAdiosAttributes: + switchType( + parameters.dtype, + detail::OldAttributeWriter(), + this, + writable, + parameters); + break; + case AttributeLayout::ByAdiosVariables: { + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[ADIOS2] Cannot write attribute in read-only mode."); + auto pos = setAndGetFilePosition(writable); + auto file = + refreshFileFromParent(writable, /* preferParentFile = */ false); + auto fullName = nameOfAttribute(writable, parameters.name); + auto prefix = filePositionToString(pos); + + auto &filedata = getFileData(file, IfFileNotOpen::ThrowError); + filedata.invalidateAttributesMap(); + m_dirty.emplace(std::move(file)); + + // this intentionally overwrites previous writes + auto &bufferedWrite = filedata.m_attributeWrites[fullName]; + bufferedWrite.name = fullName; + bufferedWrite.dtype = parameters.dtype; + bufferedWrite.resource = parameters.resource; + break; + } + default: + throw std::runtime_error("Unreachable!"); } } void ADIOS2IOHandlerImpl::readDataset( - Writable * writable, Parameter< Operation::READ_DATASET > & parameters ) + Writable *writable, Parameter ¶meters) { - setAndGetFilePosition( writable ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); detail::BufferedGet bg; - bg.name = nameOfVariable( writable ); + bg.name = nameOfVariable(writable); bg.param = parameters; - ba.enqueue( std::move( bg ) ); - m_dirty.emplace( std::move( file ) ); + ba.enqueue(std::move(bg)); + m_dirty.emplace(std::move(file)); } namespace detail { -struct GetSpan -{ - template< typename T, typename... Args > - void operator()( - ADIOS2IOHandlerImpl * impl, - Parameter< Operation::GET_BUFFER_VIEW > & params, - detail::BufferedActions & ba, - std::string const & varName ) - { - auto & IO = ba.m_IO; - auto & engine = ba.getEngine(); - adios2::Variable< T > variable = impl->verifyDataset< T >( - params.offset, params.extent, IO, varName ); - adios2::Dims offset( params.offset.begin(), params.offset.end() ); - adios2::Dims extent( params.extent.begin(), params.extent.end() ); - variable.SetSelection( { std::move( offset ), std::move( extent ) } ); - typename adios2::Variable< T >::Span span = engine.Put( variable ); - params.out->backendManagedBuffer = true; - /* - * SIC! - * Do not emplace span.data() yet. - * Only call span.data() as soon as the user needs the pointer - * (will always be propagated to the backend with parameters.update - * = true). - * This avoids repeated resizing of ADIOS2 internal buffers if calling - * multiple spans. - */ - // params.out->ptr = span.data(); - unsigned nextIndex; - if( ba.m_updateSpans.empty() ) - { - nextIndex = 0; - } - else - { - nextIndex = ba.m_updateSpans.rbegin()->first + 1; + struct GetSpan + { + template + void operator()( + ADIOS2IOHandlerImpl *impl, + Parameter ¶ms, + detail::BufferedActions &ba, + std::string const &varName) + { + auto &IO = ba.m_IO; + auto &engine = ba.getEngine(); + adios2::Variable variable = impl->verifyDataset( + params.offset, params.extent, IO, varName); + adios2::Dims offset(params.offset.begin(), params.offset.end()); + adios2::Dims extent(params.extent.begin(), params.extent.end()); + variable.SetSelection({std::move(offset), std::move(extent)}); + typename adios2::Variable::Span span = engine.Put(variable); + params.out->backendManagedBuffer = true; + /* + * SIC! + * Do not emplace span.data() yet. + * Only call span.data() as soon as the user needs the pointer + * (will always be propagated to the backend with parameters.update + * = true). + * This avoids repeated resizing of ADIOS2 internal buffers if + * calling multiple spans. + */ + // params.out->ptr = span.data(); + unsigned nextIndex; + if (ba.m_updateSpans.empty()) + { + nextIndex = 0; + } + else + { + nextIndex = ba.m_updateSpans.rbegin()->first + 1; + } + params.out->viewIndex = nextIndex; + std::unique_ptr updateSpan{ + new UpdateSpan{std::move(span)}}; + ba.m_updateSpans.emplace_hint( + ba.m_updateSpans.end(), nextIndex, std::move(updateSpan)); } - params.out->viewIndex = nextIndex; - std::unique_ptr< I_UpdateSpan > updateSpan{ - new UpdateSpan< T >{ std::move( span ) } }; - ba.m_updateSpans.emplace_hint( - ba.m_updateSpans.end(), nextIndex, std::move( updateSpan ) ); - } - std::string errorMsg = "ADIOS2: getBufferView()"; -}; + std::string errorMsg = "ADIOS2: getBufferView()"; + }; -struct HasOperators -{ - template< typename T > - bool operator()( std::string const & name, adios2::IO & IO ) const + struct HasOperators { - adios2::Variable< T > variable = IO.InquireVariable< T >( name ); - if( !variable ) + template + bool operator()(std::string const &name, adios2::IO &IO) const { - return false; + adios2::Variable variable = IO.InquireVariable(name); + if (!variable) + { + return false; + } + return !variable.Operations().empty(); } - return !variable.Operations().empty(); - } - std::string errorMsg = "ADIOS2: getBufferView()"; -}; + std::string errorMsg = "ADIOS2: getBufferView()"; + }; } // namespace detail -void -ADIOS2IOHandlerImpl::getBufferView( - Writable * writable, - Parameter< Operation::GET_BUFFER_VIEW > & parameters ) +void ADIOS2IOHandlerImpl::getBufferView( + Writable *writable, Parameter ¶meters) { // @todo check access mode - if( m_engineType != "bp4" ) + if (m_engineType != "bp4") { parameters.out->backendManagedBuffer = false; return; } - setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); - std::string name = nameOfVariable( writable ); - switch( m_useSpanBasedPutByDefault ) + std::string name = nameOfVariable(writable); + switch (m_useSpanBasedPutByDefault) { case UseSpan::No: parameters.out->backendManagedBuffer = false; return; - case UseSpan::Auto: - { + case UseSpan::Auto: { detail::HasOperators hasOperators; - if( switchAdios2VariableType( - parameters.dtype, hasOperators, name, ba.m_IO ) ) + if (switchAdios2VariableType( + parameters.dtype, hasOperators, name, ba.m_IO)) { parameters.out->backendManagedBuffer = false; return; @@ -767,79 +740,75 @@ ADIOS2IOHandlerImpl::getBufferView( break; } - if( parameters.update ) + if (parameters.update) { detail::I_UpdateSpan &updater = - *ba.m_updateSpans.at( parameters.out->viewIndex ); + *ba.m_updateSpans.at(parameters.out->viewIndex); parameters.out->ptr = updater.update(); parameters.out->backendManagedBuffer = true; } else { static detail::GetSpan gs; - switchAdios2VariableType( parameters.dtype, gs, this, parameters, ba, name ); + switchAdios2VariableType( + parameters.dtype, gs, this, parameters, ba, name); } } namespace detail { -template< typename T > -UpdateSpan< T >::UpdateSpan( adios2::detail::Span< T > span_in ) : - span( std::move( span_in ) ) -{ -} + template + UpdateSpan::UpdateSpan(adios2::detail::Span span_in) + : span(std::move(span_in)) + {} -template< typename T > -void *UpdateSpan< T >::update() -{ - return span.data(); -} + template + void *UpdateSpan::update() + { + return span.data(); + } } // namespace detail void ADIOS2IOHandlerImpl::readAttribute( - Writable * writable, Parameter< Operation::READ_ATT > & parameters ) + Writable *writable, Parameter ¶meters) { - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); - switch( attributeLayout() ) + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); + switch (attributeLayout()) { using AL = AttributeLayout; - case AL::ByAdiosAttributes: - { - detail::OldBufferedAttributeRead bar; - bar.name = nameOfAttribute( writable, parameters.name ); - bar.param = parameters; - ba.enqueue( std::move( bar ) ); - break; - } - case AL::ByAdiosVariables: - { - detail::BufferedAttributeRead bar; - bar.name = nameOfAttribute( writable, parameters.name ); - bar.param = parameters; - ba.m_attributeReads.push_back( std::move( bar ) ); - break; - } - default: - throw std::runtime_error( "Unreachable!" ); + case AL::ByAdiosAttributes: { + detail::OldBufferedAttributeRead bar; + bar.name = nameOfAttribute(writable, parameters.name); + bar.param = parameters; + ba.enqueue(std::move(bar)); + break; } - m_dirty.emplace( std::move( file ) ); + case AL::ByAdiosVariables: { + detail::BufferedAttributeRead bar; + bar.name = nameOfAttribute(writable, parameters.name); + bar.param = parameters; + ba.m_attributeReads.push_back(std::move(bar)); + break; + } + default: + throw std::runtime_error("Unreachable!"); + } + m_dirty.emplace(std::move(file)); } void ADIOS2IOHandlerImpl::listPaths( - Writable * writable, Parameter< Operation::LIST_PATHS > & parameters ) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( writable->written, "[ADIOS2] Internal error: Writable not marked written during path " - "listing" ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); - std::string myName = filePositionToString( pos ); - if ( !auxiliary::ends_with( myName, '/' ) ) + "listing"); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); + std::string myName = filePositionToString(pos); + if (!auxiliary::ends_with(myName, '/')) { myName = myName + '/'; } @@ -848,10 +817,10 @@ void ADIOS2IOHandlerImpl::listPaths( * since ADIOS does not have a concept of paths, restore them * from variables and attributes. */ - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); fileData.requireActiveStep(); - std::unordered_set< std::string > subdirs; + std::unordered_set subdirs; /* * When reading an attribute, we cannot distinguish * whether its containing "folder" is a group or a @@ -864,101 +833,99 @@ void ADIOS2IOHandlerImpl::listPaths( * from variables – attributes don't even need to be * inspected. */ - std::vector< std::string > delete_me; + std::vector delete_me; - switch( attributeLayout() ) + switch (attributeLayout()) { using AL = AttributeLayout; - case AL::ByAdiosVariables: + case AL::ByAdiosVariables: { + std::vector vars = + fileData.availableVariablesPrefixed(myName); + for (auto var : vars) { - std::vector< std::string > vars = - fileData.availableVariablesPrefixed( myName ); - for( auto var : vars ) + // since current Writable is a group and no dataset, + // var == "__data__" is not possible + if (auxiliary::ends_with(var, "/__data__")) { - // since current Writable is a group and no dataset, - // var == "__data__" is not possible - if( auxiliary::ends_with( var, "/__data__" ) ) + // here be datasets + var = auxiliary::replace_last(var, "/__data__", ""); + auto firstSlash = var.find_first_of('/'); + if (firstSlash != std::string::npos) { - // here be datasets - var = auxiliary::replace_last( var, "/__data__", "" ); - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - var = var.substr( 0, firstSlash ); - subdirs.emplace( std::move( var ) ); - } - else - { // var is a dataset at the current level - delete_me.push_back( std::move( var ) ); - } + var = var.substr(0, firstSlash); + subdirs.emplace(std::move(var)); } else + { // var is a dataset at the current level + delete_me.push_back(std::move(var)); + } + } + else + { + // here be attributes + auto firstSlash = var.find_first_of('/'); + if (firstSlash != std::string::npos) { - // here be attributes - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - var = var.substr( 0, firstSlash ); - subdirs.emplace( std::move( var ) ); - } + var = var.substr(0, firstSlash); + subdirs.emplace(std::move(var)); } } - break; } - case AL::ByAdiosAttributes: + break; + } + case AL::ByAdiosAttributes: { + std::vector vars = + fileData.availableVariablesPrefixed(myName); + for (auto var : vars) { - std::vector< std::string > vars = - fileData.availableVariablesPrefixed( myName ); - for( auto var : vars ) + auto firstSlash = var.find_first_of('/'); + if (firstSlash != std::string::npos) { - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - var = var.substr( 0, firstSlash ); - subdirs.emplace( std::move( var ) ); - } - else - { // var is a dataset at the current level - delete_me.push_back( std::move( var ) ); - } + var = var.substr(0, firstSlash); + subdirs.emplace(std::move(var)); + } + else + { // var is a dataset at the current level + delete_me.push_back(std::move(var)); } - std::vector< std::string > attributes = - fileData.availableAttributesPrefixed( myName ); - for( auto attr : attributes ) + } + std::vector attributes = + fileData.availableAttributesPrefixed(myName); + for (auto attr : attributes) + { + auto firstSlash = attr.find_first_of('/'); + if (firstSlash != std::string::npos) { - auto firstSlash = attr.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - attr = attr.substr( 0, firstSlash ); - subdirs.emplace( std::move( attr ) ); - } + attr = attr.substr(0, firstSlash); + subdirs.emplace(std::move(attr)); } - break; } + break; + } } - for ( auto & d : delete_me ) + for (auto &d : delete_me) { - subdirs.erase( d ); + subdirs.erase(d); } - for ( auto & path : subdirs ) + for (auto &path : subdirs) { - parameters.paths->emplace_back( std::move( path ) ); + parameters.paths->emplace_back(std::move(path)); } } void ADIOS2IOHandlerImpl::listDatasets( - Writable * writable, Parameter< Operation::LIST_DATASETS > & parameters ) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( writable->written, "[ADIOS2] Internal error: Writable not marked written during path " - "listing" ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); + "listing"); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); // adios2::Engine & engine = getEngine( file ); - std::string myName = filePositionToString( pos ); - if ( !auxiliary::ends_with( myName, '/' ) ) + std::string myName = filePositionToString(pos); + if (!auxiliary::ends_with(myName, '/')) { myName = myName + '/'; } @@ -968,153 +935,147 @@ void ADIOS2IOHandlerImpl::listDatasets( * from variables and attributes. */ - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); fileData.requireActiveStep(); - std::unordered_set< std::string > subdirs; - for( auto var : fileData.availableVariablesPrefixed( myName ) ) + std::unordered_set subdirs; + for (auto var : fileData.availableVariablesPrefixed(myName)) { - if( attributeLayout() == AttributeLayout::ByAdiosVariables ) + if (attributeLayout() == AttributeLayout::ByAdiosVariables) { // since current Writable is a group and no dataset, // var == "__data__" is not possible - if( !auxiliary::ends_with( var, "/__data__" ) ) + if (!auxiliary::ends_with(var, "/__data__")) { continue; } // variable is now definitely a dataset, let's strip the suffix - var = auxiliary::replace_last( var, "/__data__", "" ); + var = auxiliary::replace_last(var, "/__data__", ""); } // if string still contains a slash, variable is a dataset below the // current group // we only want datasets contained directly within the current group // let's ensure that - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash == std::string::npos ) + auto firstSlash = var.find_first_of('/'); + if (firstSlash == std::string::npos) { - subdirs.emplace( std::move( var ) ); + subdirs.emplace(std::move(var)); } } - for( auto & dataset : subdirs ) + for (auto &dataset : subdirs) { - parameters.datasets->emplace_back( std::move( dataset ) ); + parameters.datasets->emplace_back(std::move(dataset)); } } void ADIOS2IOHandlerImpl::listAttributes( - Writable * writable, Parameter< Operation::LIST_ATTS > & parameters ) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( writable->written, "[ADIOS2] Internal error: Writable not marked " - "written during attribute writing" ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); - auto attributePrefix = filePositionToString( pos ); - if ( attributePrefix == "/" ) + "written during attribute writing"); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); + auto attributePrefix = filePositionToString(pos); + if (attributePrefix == "/") { attributePrefix = ""; } - auto & ba = getFileData( file, IfFileNotOpen::ThrowError ); + auto &ba = getFileData(file, IfFileNotOpen::ThrowError); ba.requireActiveStep(); // make sure that the attributes are present - std::vector< std::string > attrs; - switch( attributeLayout() ) + std::vector attrs; + switch (attributeLayout()) { using AL = AttributeLayout; - case AL::ByAdiosAttributes: - attrs = ba.availableAttributesPrefixed( attributePrefix ); - break; - case AL::ByAdiosVariables: - attrs = ba.availableVariablesPrefixed( attributePrefix ); - break; + case AL::ByAdiosAttributes: + attrs = ba.availableAttributesPrefixed(attributePrefix); + break; + case AL::ByAdiosVariables: + attrs = ba.availableVariablesPrefixed(attributePrefix); + break; } - for( auto & rawAttr : attrs ) + for (auto &rawAttr : attrs) { - if( attributeLayout() == AttributeLayout::ByAdiosVariables && - ( auxiliary::ends_with( rawAttr, "/__data__" ) || - rawAttr == "__data__" ) ) + if (attributeLayout() == AttributeLayout::ByAdiosVariables && + (auxiliary::ends_with(rawAttr, "/__data__") || + rawAttr == "__data__")) { continue; } - auto attr = auxiliary::removeSlashes( rawAttr ); - if( attr.find_last_of( '/' ) == std::string::npos ) + auto attr = auxiliary::removeSlashes(rawAttr); + if (attr.find_last_of('/') == std::string::npos) { - parameters.attributes->push_back( std::move( attr ) ); + parameters.attributes->push_back(std::move(attr)); } } } -void -ADIOS2IOHandlerImpl::advance( - Writable * writable, - Parameter< Operation::ADVANCE > & parameters ) +void ADIOS2IOHandlerImpl::advance( + Writable *writable, Parameter ¶meters) { - auto file = m_files[ writable ]; - auto & ba = getFileData( file, IfFileNotOpen::ThrowError ); - *parameters.status = ba.advance( parameters.mode ); + auto file = m_files[writable]; + auto &ba = getFileData(file, IfFileNotOpen::ThrowError); + *parameters.status = ba.advance(parameters.mode); } -void -ADIOS2IOHandlerImpl::closePath( - Writable * writable, - Parameter< Operation::CLOSE_PATH > const & ) +void ADIOS2IOHandlerImpl::closePath( + Writable *writable, Parameter const &) { VERIFY_ALWAYS( writable->written, - "[ADIOS2] Cannot close a path that has not been written yet." ); - if( m_handler->m_backendAccess == Access::READ_ONLY ) + "[ADIOS2] Cannot close a path that has not been written yet."); + if (m_handler->m_backendAccess == Access::READ_ONLY) { // nothing to do return; } - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); - if( !fileData.optimizeAttributesStreaming ) + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); + if (!fileData.optimizeAttributesStreaming) { return; } - auto position = setAndGetFilePosition( writable ); - auto const positionString = filePositionToString( position ); + auto position = setAndGetFilePosition(writable); + auto const positionString = filePositionToString(position); VERIFY( - !auxiliary::ends_with( positionString, '/' ), + !auxiliary::ends_with(positionString, '/'), "[ADIOS2] Position string has unexpected format. This is a bug " - "in the openPMD API." ); + "in the openPMD API."); - for( auto const & attr : - fileData.availableAttributesPrefixed( positionString ) ) + for (auto const &attr : + fileData.availableAttributesPrefixed(positionString)) { - fileData.m_IO.RemoveAttribute( positionString + '/' + attr ); + fileData.m_IO.RemoveAttribute(positionString + '/' + attr); } } void ADIOS2IOHandlerImpl::availableChunks( - Writable * writable, Parameter< Operation::AVAILABLE_CHUNKS > & parameters ) + Writable *writable, Parameter ¶meters) { - setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); - std::string varName = nameOfVariable( writable ); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); + std::string varName = nameOfVariable(writable); auto engine = ba.getEngine(); // make sure that data are present - auto datatype = detail::fromADIOS2Type( ba.m_IO.VariableType( varName ) ); + auto datatype = detail::fromADIOS2Type(ba.m_IO.VariableType(varName)); static detail::RetrieveBlocksInfo rbi; switchAdios2VariableType( - datatype, rbi, parameters, ba.m_IO, engine, varName ); + datatype, rbi, parameters, ba.m_IO, engine, varName); } -adios2::Mode -ADIOS2IOHandlerImpl::adios2AccessMode( std::string const & fullPath ) +adios2::Mode ADIOS2IOHandlerImpl::adios2AccessMode(std::string const &fullPath) { - switch ( m_handler->m_backendAccess ) + switch (m_handler->m_backendAccess) { case Access::CREATE: return adios2::Mode::Write; case Access::READ_ONLY: return adios2::Mode::Read; case Access::READ_WRITE: - if( auxiliary::directory_exists( fullPath ) || - auxiliary::file_exists( fullPath ) ) + if (auxiliary::directory_exists(fullPath) || + auxiliary::file_exists(fullPath)) { std::cerr << "ADIOS2 does currently not yet implement ReadWrite " "(Append) mode. " @@ -1135,135 +1096,128 @@ ADIOS2IOHandlerImpl::adios2AccessMode( std::string const & fullPath ) auxiliary::TracingJSON ADIOS2IOHandlerImpl::nullvalue = nlohmann::json(); -std::string -ADIOS2IOHandlerImpl::filePositionToString( - std::shared_ptr< ADIOS2FilePosition > filepos ) +std::string ADIOS2IOHandlerImpl::filePositionToString( + std::shared_ptr filepos) { return filepos->location; } -std::shared_ptr< ADIOS2FilePosition > ADIOS2IOHandlerImpl::extendFilePosition( - std::shared_ptr< ADIOS2FilePosition > const & oldPos, std::string s ) +std::shared_ptr ADIOS2IOHandlerImpl::extendFilePosition( + std::shared_ptr const &oldPos, std::string s) { - auto path = filePositionToString( oldPos ); - if ( !auxiliary::ends_with( path, '/' ) && - !auxiliary::starts_with( s, '/' ) ) + auto path = filePositionToString(oldPos); + if (!auxiliary::ends_with(path, '/') && !auxiliary::starts_with(s, '/')) { path = path + "/"; } - else if ( auxiliary::ends_with( path, '/' ) && - auxiliary::starts_with( s, '/' ) ) + else if (auxiliary::ends_with(path, '/') && auxiliary::starts_with(s, '/')) { - path = auxiliary::replace_last( path, "/", "" ); + path = auxiliary::replace_last(path, "/", ""); } - return std::make_shared< ADIOS2FilePosition >( path + std::move( s ), - oldPos->gd ); + return std::make_shared( + path + std::move(s), oldPos->gd); } -auxiliary::Option< adios2::Operator > -ADIOS2IOHandlerImpl::getCompressionOperator( std::string const & compression ) +auxiliary::Option +ADIOS2IOHandlerImpl::getCompressionOperator(std::string const &compression) { adios2::Operator res; - auto it = m_operators.find( compression ); - if ( it == m_operators.end( ) ) + auto it = m_operators.find(compression); + if (it == m_operators.end()) { - try { - res = m_ADIOS.DefineOperator( compression, compression ); + try + { + res = m_ADIOS.DefineOperator(compression, compression); } - catch ( std::invalid_argument const & e ) + catch (std::invalid_argument const &e) { std::cerr << "Warning: ADIOS2 backend does not support compression " "method " << compression << ". Continuing without compression." - << "\nOriginal error: " << e.what() - << std::endl; - return auxiliary::Option< adios2::Operator >(); + << "\nOriginal error: " << e.what() << std::endl; + return auxiliary::Option(); } - catch(std::string const & s) + catch (std::string const &s) { std::cerr << "Warning: ADIOS2 backend does not support compression " "method " << compression << ". Continuing without compression." - << "\nOriginal error: " << s - << std::endl; - return auxiliary::Option< adios2::Operator >(); + << "\nOriginal error: " << s << std::endl; + return auxiliary::Option(); } - m_operators.emplace( compression, res ); + m_operators.emplace(compression, res); } else { res = it->second; } - return auxiliary::makeOption( adios2::Operator( res ) ); + return auxiliary::makeOption(adios2::Operator(res)); } -std::string -ADIOS2IOHandlerImpl::nameOfVariable( Writable * writable ) +std::string ADIOS2IOHandlerImpl::nameOfVariable(Writable *writable) { - auto filepos = setAndGetFilePosition( writable ); - auto res = filePositionToString( filepos ); - if( attributeLayout() == AttributeLayout::ByAdiosAttributes ) + auto filepos = setAndGetFilePosition(writable); + auto res = filePositionToString(filepos); + if (attributeLayout() == AttributeLayout::ByAdiosAttributes) { return res; } - switch( filepos->gd ) + switch (filepos->gd) { - case ADIOS2FilePosition::GD::GROUP: - return res; - case ADIOS2FilePosition::GD::DATASET: - if( auxiliary::ends_with( res, '/' ) ) - { - return res + "__data__"; - } - else - { - // By convention, this path should always be taken - // But let's be safe - return res + "/__data__"; - } - default: - throw std::runtime_error( "[ADIOS2IOHandlerImpl] Unreachable!" ); + case ADIOS2FilePosition::GD::GROUP: + return res; + case ADIOS2FilePosition::GD::DATASET: + if (auxiliary::ends_with(res, '/')) + { + return res + "__data__"; + } + else + { + // By convention, this path should always be taken + // But let's be safe + return res + "/__data__"; + } + default: + throw std::runtime_error("[ADIOS2IOHandlerImpl] Unreachable!"); } } -std::string ADIOS2IOHandlerImpl::nameOfAttribute( Writable * writable, - std::string attribute ) +std::string +ADIOS2IOHandlerImpl::nameOfAttribute(Writable *writable, std::string attribute) { - auto pos = setAndGetFilePosition( writable ); + auto pos = setAndGetFilePosition(writable); return filePositionToString( - extendFilePosition( pos, auxiliary::removeSlashes( attribute ) ) ); + extendFilePosition(pos, auxiliary::removeSlashes(attribute))); } -ADIOS2FilePosition::GD -ADIOS2IOHandlerImpl::groupOrDataset( Writable * writable ) +ADIOS2FilePosition::GD ADIOS2IOHandlerImpl::groupOrDataset(Writable *writable) { - return setAndGetFilePosition( writable )->gd; + return setAndGetFilePosition(writable)->gd; } detail::BufferedActions & -ADIOS2IOHandlerImpl::getFileData( InvalidatableFile file, IfFileNotOpen flag ) +ADIOS2IOHandlerImpl::getFileData(InvalidatableFile file, IfFileNotOpen flag) { VERIFY_ALWAYS( file.valid(), "[ADIOS2] Cannot retrieve file data for a file that has " - "been overwritten or deleted." ) - auto it = m_fileData.find( file ); - if( it == m_fileData.end() ) + "been overwritten or deleted.") + auto it = m_fileData.find(file); + if (it == m_fileData.end()) { - switch( flag ) + switch (flag) { case IfFileNotOpen::OpenImplicitly: { auto res = m_fileData.emplace( - std::move( file ), - std::make_unique< detail::BufferedActions >( *this, file ) ); + std::move(file), + std::make_unique(*this, file)); return *res.first->second; } case IfFileNotOpen::ThrowError: throw std::runtime_error( "[ADIOS2] Requested file has not been opened yet: " + - ( file.fileState ? file.fileState->name - : "Unknown file name" ) ); + (file.fileState ? file.fileState->name : "Unknown file name")); } } else @@ -1272,104 +1226,105 @@ ADIOS2IOHandlerImpl::getFileData( InvalidatableFile file, IfFileNotOpen flag ) } } -void ADIOS2IOHandlerImpl::dropFileData( InvalidatableFile file ) +void ADIOS2IOHandlerImpl::dropFileData(InvalidatableFile file) { - auto it = m_fileData.find( file ); - if ( it != m_fileData.end( ) ) + auto it = m_fileData.find(file); + if (it != m_fileData.end()) { - it->second->drop( ); - m_fileData.erase( it ); + it->second->drop(); + m_fileData.erase(it); } } -template < typename T > -adios2::Variable< T > -ADIOS2IOHandlerImpl::verifyDataset( Offset const & offset, - Extent const & extent, adios2::IO & IO, - std::string const & varName ) +template +adios2::Variable ADIOS2IOHandlerImpl::verifyDataset( + Offset const &offset, + Extent const &extent, + adios2::IO &IO, + std::string const &varName) { { - auto requiredType = adios2::GetType< T >( ); - auto actualType = IO.VariableType( varName ); + auto requiredType = adios2::GetType(); + auto actualType = IO.VariableType(varName); std::stringstream errorMessage; errorMessage << "[ADIOS2] Trying to access a dataset with wrong type (trying to " "access dataset with type " - << determineDatatype< T >() << ", but has type " - << detail::fromADIOS2Type( actualType, false ) << ")"; - VERIFY_ALWAYS( requiredType == actualType, errorMessage.str() ); + << determineDatatype() << ", but has type " + << detail::fromADIOS2Type(actualType, false) << ")"; + VERIFY_ALWAYS(requiredType == actualType, errorMessage.str()); } - adios2::Variable< T > var = IO.InquireVariable< T >( varName ); - VERIFY_ALWAYS( var.operator bool( ), - "[ADIOS2] Internal error: Failed opening ADIOS2 variable." ) + adios2::Variable var = IO.InquireVariable(varName); + VERIFY_ALWAYS( + var.operator bool(), + "[ADIOS2] Internal error: Failed opening ADIOS2 variable.") // TODO leave this check to ADIOS? - adios2::Dims shape = var.Shape( ); - auto actualDim = shape.size( ); + adios2::Dims shape = var.Shape(); + auto actualDim = shape.size(); { - auto requiredDim = extent.size( ); - VERIFY_ALWAYS( requiredDim == actualDim, - "[ADIOS2] Trying to access a dataset with wrong dimensionality " - "(trying to access dataset with dimensionality " + - std::to_string( requiredDim ) + - ", but has dimensionality " + - std::to_string( actualDim ) + ")" ) + auto requiredDim = extent.size(); + VERIFY_ALWAYS( + requiredDim == actualDim, + "[ADIOS2] Trying to access a dataset with wrong dimensionality " + "(trying to access dataset with dimensionality " + + std::to_string(requiredDim) + ", but has dimensionality " + + std::to_string(actualDim) + ")") } - for ( unsigned int i = 0; i < actualDim; i++ ) + for (unsigned int i = 0; i < actualDim; i++) { - VERIFY_ALWAYS( offset[i] + extent[i] <= shape[i], - "[ADIOS2] Dataset access out of bounds." ) + VERIFY_ALWAYS( + offset[i] + extent[i] <= shape[i], + "[ADIOS2] Dataset access out of bounds.") } - var.SetSelection({ - adios2::Dims(offset.begin(), offset.end()), - adios2::Dims(extent.begin(), extent.end()) - }); + var.SetSelection( + {adios2::Dims(offset.begin(), offset.end()), + adios2::Dims(extent.begin(), extent.end())}); return var; } namespace detail { - DatasetReader::DatasetReader( openPMD::ADIOS2IOHandlerImpl * impl ) - : m_impl{impl} - { - } + DatasetReader::DatasetReader(openPMD::ADIOS2IOHandlerImpl *impl) + : m_impl{impl} + {} - template < typename T> - void - DatasetReader::operator( )( detail::BufferedGet & bp, adios2::IO & IO, - adios2::Engine & engine, - std::string const & fileName ) + template + void DatasetReader::operator()( + detail::BufferedGet &bp, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &fileName) { - adios2::Variable< T > var = m_impl->verifyDataset< T >( - bp.param.offset, bp.param.extent, IO, bp.name ); - if ( !var ) + adios2::Variable var = m_impl->verifyDataset( + bp.param.offset, bp.param.extent, IO, bp.name); + if (!var) { throw std::runtime_error( - "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + bp.name + - "' from file " + fileName + "." ); + "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + + bp.name + "' from file " + fileName + "."); } - auto ptr = std::static_pointer_cast< T >( bp.param.data ).get( ); - engine.Get( var, ptr ); + auto ptr = std::static_pointer_cast(bp.param.data).get(); + engine.Get(var, ptr); } - template< typename T > - Datatype - OldAttributeReader::operator()( - adios2::IO & IO, + template + Datatype OldAttributeReader::operator()( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { /* * If we store an attribute of boolean type, we store an additional * attribute prefixed with '__is_boolean__' to indicate this information * that would otherwise be lost. Check whether this has been done. */ - using rep = AttributeTypes< bool >::rep; + using rep = AttributeTypes::rep; if #if __cplusplus >= 201703L constexpr #endif - ( std::is_same< T, rep >::value ) + (std::is_same::value) { std::string metaAttr = ADIOS2Defaults::str_isBooleanOldLayout + name; @@ -1382,38 +1337,35 @@ namespace detail auto type = attributeInfo( IO, ADIOS2Defaults::str_isBooleanOldLayout + name, - /* verbose = */ false ); - if( type == determineDatatype< rep >() ) + /* verbose = */ false); + if (type == determineDatatype()) { - auto attr = IO.InquireAttribute< rep >( metaAttr ); - if( attr.Data().size() == 1 && attr.Data()[ 0 ] == 1 ) + auto attr = IO.InquireAttribute(metaAttr); + if (attr.Data().size() == 1 && attr.Data()[0] == 1) { - AttributeTypes< bool >::oldReadAttribute( - IO, name, resource ); - return determineDatatype< bool >(); + AttributeTypes::oldReadAttribute(IO, name, resource); + return determineDatatype(); } } } - AttributeTypes< T >::oldReadAttribute( IO, name, resource ); - return determineDatatype< T >(); + AttributeTypes::oldReadAttribute(IO, name, resource); + return determineDatatype(); } - template< int n, typename... Params > - Datatype - OldAttributeReader::operator()( Params &&... ) + template + Datatype OldAttributeReader::operator()(Params &&...) { throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to read an attribute." ); + "trying to read an attribute."); } - template< typename T > - Datatype - AttributeReader::operator()( - adios2::IO & IO, - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + Datatype AttributeReader::operator()( + adios2::IO &IO, + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { /* * If we store an attribute of boolean type, we store an additional @@ -1423,9 +1375,9 @@ namespace detail using rep = AttributeTypes::rep; if #if __cplusplus >= 201703L - constexpr + constexpr #endif - ( std::is_same< T, rep >::value ) + (std::is_same::value) { std::string metaAttr = ADIOS2Defaults::str_isBooleanNewLayout + name; @@ -1438,70 +1390,69 @@ namespace detail auto type = attributeInfo( IO, ADIOS2Defaults::str_isBooleanNewLayout + name, - /* verbose = */ false ); - if( type == determineDatatype< rep >() ) + /* verbose = */ false); + if (type == determineDatatype()) { - auto attr = IO.InquireAttribute< rep >( metaAttr ); + auto attr = IO.InquireAttribute(metaAttr); if (attr.Data().size() == 1 && attr.Data()[0] == 1) { - AttributeTypes< bool >::readAttribute( - preloadedAttributes, name, resource ); - return determineDatatype< bool >(); + AttributeTypes::readAttribute( + preloadedAttributes, name, resource); + return determineDatatype(); } } } - AttributeTypes< T >::readAttribute( - preloadedAttributes, name, resource ); - return determineDatatype< T >(); + AttributeTypes::readAttribute(preloadedAttributes, name, resource); + return determineDatatype(); } - template < int n, typename... Params > - Datatype AttributeReader::operator( )( Params &&... ) + template + Datatype AttributeReader::operator()(Params &&...) { - throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to read an attribute." ); + throw std::runtime_error( + "[ADIOS2] Internal error: Unknown datatype while " + "trying to read an attribute."); } - template< typename T > + template void OldAttributeWriter::operator()( - ADIOS2IOHandlerImpl * impl, - Writable * writable, - const Parameter< Operation::WRITE_ATT > & parameters ) + ADIOS2IOHandlerImpl *impl, + Writable *writable, + const Parameter ¶meters) { VERIFY_ALWAYS( impl->m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write attribute in read-only mode." ); - auto pos = impl->setAndGetFilePosition( writable ); - auto file = impl->refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto fullName = impl->nameOfAttribute( writable, parameters.name ); - auto prefix = impl->filePositionToString( pos ); - - auto & filedata = impl->getFileData( - file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError ); + "[ADIOS2] Cannot write attribute in read-only mode."); + auto pos = impl->setAndGetFilePosition(writable); + auto file = impl->refreshFileFromParent( + writable, /* preferParentFile = */ false); + auto fullName = impl->nameOfAttribute(writable, parameters.name); + auto prefix = impl->filePositionToString(pos); + + auto &filedata = impl->getFileData( + file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError); filedata.invalidateAttributesMap(); adios2::IO IO = filedata.m_IO; - impl->m_dirty.emplace( std::move( file ) ); + impl->m_dirty.emplace(std::move(file)); - std::string t = IO.AttributeType( fullName ); - if ( !t.empty( ) ) // an attribute is present <=> it has a type + std::string t = IO.AttributeType(fullName); + if (!t.empty()) // an attribute is present <=> it has a type { // don't overwrite attributes if they are equivalent // overwriting is only legal within the same step - auto attributeModifiable = [ &filedata, &fullName ]() { - auto it = filedata.uncommittedAttributes.find( fullName ); + auto attributeModifiable = [&filedata, &fullName]() { + auto it = filedata.uncommittedAttributes.find(fullName); return it != filedata.uncommittedAttributes.end(); }; - if( AttributeTypes< T >::attributeUnchanged( - IO, - fullName, - variantSrc::get< T >( parameters.resource ) ) ) + if (AttributeTypes::attributeUnchanged( + IO, fullName, variantSrc::get(parameters.resource))) { return; } - else if( attributeModifiable() ) + else if (attributeModifiable()) { - IO.RemoveAttribute( fullName ); + IO.RemoveAttribute(fullName); } else { @@ -1513,492 +1464,470 @@ namespace detail } else { - filedata.uncommittedAttributes.emplace( fullName ); + filedata.uncommittedAttributes.emplace(fullName); } - AttributeTypes< T >::oldCreateAttribute( - IO, fullName, variantSrc::get< T >( parameters.resource ) ); + AttributeTypes::oldCreateAttribute( + IO, fullName, variantSrc::get(parameters.resource)); } - template< int n, typename... Params > - void - OldAttributeWriter::operator()( Params &&... ) + template + void OldAttributeWriter::operator()(Params &&...) { throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to write an attribute." ); + "trying to write an attribute."); } - template< typename T > - void - AttributeWriter::operator()( - detail::BufferedAttributeWrite & params, - BufferedActions & fileData ) + template + void AttributeWriter::operator()( + detail::BufferedAttributeWrite ¶ms, BufferedActions &fileData) { - AttributeTypes< T >::createAttribute( + AttributeTypes::createAttribute( fileData.m_IO, fileData.requireActiveStep(), params, - variantSrc::get< T >( params.resource ) ); + variantSrc::get(params.resource)); } - template < int n, typename... Params > - void AttributeWriter::operator( )( Params &&... ) + template + void AttributeWriter::operator()(Params &&...) { - throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to write an attribute." ); + throw std::runtime_error( + "[ADIOS2] Internal error: Unknown datatype while " + "trying to write an attribute."); } - DatasetOpener::DatasetOpener( ADIOS2IOHandlerImpl * impl ) : m_impl{impl} - { - } + DatasetOpener::DatasetOpener(ADIOS2IOHandlerImpl *impl) : m_impl{impl} + {} - template < typename T > - void DatasetOpener:: - operator( )( InvalidatableFile file, const std::string & varName, - Parameter< Operation::OPEN_DATASET > & parameters ) + template + void DatasetOpener::operator()( + InvalidatableFile file, + const std::string &varName, + Parameter ¶meters) { - auto & fileData = m_impl->getFileData( - file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError ); + auto &fileData = m_impl->getFileData( + file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError); fileData.requireActiveStep(); - auto & IO = fileData.m_IO; - adios2::Variable< T > var = IO.InquireVariable< T >( varName ); - if( !var ) + auto &IO = fileData.m_IO; + adios2::Variable var = IO.InquireVariable(varName); + if (!var) { throw std::runtime_error( - "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + varName + - "' from file " + *file + "." ); + "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + + varName + "' from file " + *file + "."); } // cast from adios2::Dims to openPMD::Extent auto const shape = var.Shape(); parameters.extent->clear(); - parameters.extent->reserve( shape.size() ); - std::copy( shape.begin(), shape.end(), std::back_inserter(*parameters.extent) ); + parameters.extent->reserve(shape.size()); + std::copy( + shape.begin(), shape.end(), std::back_inserter(*parameters.extent)); } - WriteDataset::WriteDataset( ADIOS2IOHandlerImpl * handlerImpl ) - : m_handlerImpl{handlerImpl} - { - } + WriteDataset::WriteDataset(ADIOS2IOHandlerImpl *handlerImpl) + : m_handlerImpl{handlerImpl} + {} - template < typename T > - void WriteDataset::operator( )( detail::BufferedPut & bp, adios2::IO & IO, - adios2::Engine & engine ) + template + void WriteDataset::operator()( + detail::BufferedPut &bp, adios2::IO &IO, adios2::Engine &engine) { - VERIFY_ALWAYS( m_handlerImpl->m_handler->m_backendAccess != - Access::READ_ONLY, - "[ADIOS2] Cannot write data in read-only mode." ); + VERIFY_ALWAYS( + m_handlerImpl->m_handler->m_backendAccess != Access::READ_ONLY, + "[ADIOS2] Cannot write data in read-only mode."); - auto ptr = std::static_pointer_cast< const T >( bp.param.data ).get( ); + auto ptr = std::static_pointer_cast(bp.param.data).get(); - adios2::Variable< T > var = m_handlerImpl->verifyDataset< T >( - bp.param.offset, bp.param.extent, IO, bp.name ); + adios2::Variable var = m_handlerImpl->verifyDataset( + bp.param.offset, bp.param.extent, IO, bp.name); - engine.Put( var, ptr ); + engine.Put(var, ptr); } - template < int n, typename... Params > - void WriteDataset::operator( )( Params &&... ) + template + void WriteDataset::operator()(Params &&...) { - throw std::runtime_error( "[ADIOS2] WRITE_DATASET: Invalid datatype." ); + throw std::runtime_error("[ADIOS2] WRITE_DATASET: Invalid datatype."); } - template < typename T > - void VariableDefiner::operator( )( - adios2::IO & IO, - std::string const & name, - std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > const & - compressions, - adios2::Dims const & shape, - adios2::Dims const & start, - adios2::Dims const & count, - bool const constantDims ) + template + void VariableDefiner::operator()( + adios2::IO &IO, + std::string const &name, + std::vector const + &compressions, + adios2::Dims const &shape, + adios2::Dims const &start, + adios2::Dims const &count, + bool const constantDims) { /* * Step/Variable-based iteration layout: * The variable may already be defined from a previous step, * so check if it's already here. */ - adios2::Variable< T > var = IO.InquireVariable< T >( name ); - if( !var ) + adios2::Variable var = IO.InquireVariable(name); + if (!var) { - var = IO.DefineVariable< T >( - name, shape, start, count, constantDims ); + var = IO.DefineVariable(name, shape, start, count, constantDims); } else { - var.SetShape( shape ); - if( count.size() > 0 ) + var.SetShape(shape); + if (count.size() > 0) { - var.SetSelection( { start, count } ); + var.SetSelection({start, count}); } // don't add compression operators multiple times return; } - if( !var ) + if (!var) { throw std::runtime_error( - "[ADIOS2] Internal error: Could not create Variable '" + name + "'." ); + "[ADIOS2] Internal error: Could not create Variable '" + name + + "'."); } - for( auto const & compression : compressions ) + for (auto const &compression : compressions) { - if( compression.op ) + if (compression.op) { - var.AddOperation( compression.op, compression.params ); + var.AddOperation(compression.op, compression.params); } } } - template < typename T > - void RetrieveBlocksInfo::operator( )( - Parameter< Operation::AVAILABLE_CHUNKS > & params, - adios2::IO & IO, - adios2::Engine & engine, - std::string const & varName ) + template + void RetrieveBlocksInfo::operator()( + Parameter ¶ms, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &varName) { - auto var = IO.InquireVariable< T >( varName ); - auto blocksInfo = engine.BlocksInfo< T >( var, engine.CurrentStep() ); - auto & table = *params.chunks; - table.reserve( blocksInfo.size() ); - for( auto const & info : blocksInfo ) + auto var = IO.InquireVariable(varName); + auto blocksInfo = engine.BlocksInfo(var, engine.CurrentStep()); + auto &table = *params.chunks; + table.reserve(blocksInfo.size()); + for (auto const &info : blocksInfo) { Offset offset; Extent extent; auto size = info.Start.size(); - offset.reserve( size ); - extent.reserve( size ); - for( unsigned i = 0; i < size; ++i ) + offset.reserve(size); + extent.reserve(size); + for (unsigned i = 0; i < size; ++i) { - offset.push_back( info.Start[ i ] ); - extent.push_back( info.Count[ i ] ); + offset.push_back(info.Start[i]); + extent.push_back(info.Count[i]); } table.emplace_back( - std::move( offset ), std::move( extent ), info.WriterID ); + std::move(offset), std::move(extent), info.WriterID); } } - template < int n, typename... Args > - void RetrieveBlocksInfo::operator( )( Args&&... ) + template + void RetrieveBlocksInfo::operator()(Args &&...) { // variable has not been found, so we don't fill in any blocks } - template< typename T > - void - AttributeTypes< T >::oldCreateAttribute( - adios2::IO & IO, - std::string name, - const T value ) + template + void AttributeTypes::oldCreateAttribute( + adios2::IO &IO, std::string name, const T value) { - auto attr = IO.DefineAttribute( name, value ); - if( !attr ) + auto attr = IO.DefineAttribute(name, value); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining attribute '" + name + - "'." ); + "'."); } } - template< typename T > - void - AttributeTypes< T >::oldReadAttribute( - adios2::IO & IO, + template + void AttributeTypes::oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - auto attr = IO.InquireAttribute< T >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed reading attribute '" + name + - "'." ); + "'."); } - *resource = attr.Data()[ 0 ]; + *resource = attr.Data()[0]; } - template< typename T > - void - AttributeTypes< T >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const T value ) + template + void AttributeTypes::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const T value) { - auto attr = IO.InquireVariable< T >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { // std::cout << "DATATYPE OF " << name << ": " // << IO.VariableType( name ) << std::endl; - attr = IO.DefineVariable< T >( params.name ); + attr = IO.DefineVariable(params.name); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } - engine.Put( attr, value, adios2::Mode::Deferred ); + engine.Put(attr, value, adios2::Mode::Deferred); } - template< typename T > - void - AttributeTypes< T >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + void AttributeTypes::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< T > attr = - preloadedAttributes.getAttribute< T >( name ); - if( !( attr.shape.size() == 0 || - ( attr.shape.size() == 1 && attr.shape[ 0 ] == 1 ) ) ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (!(attr.shape.size() == 0 || + (attr.shape.size() == 1 && attr.shape[0] == 1))) { throw std::runtime_error( "[ADIOS2] Expecting scalar ADIOS variable, got " + - std::to_string( attr.shape.size() ) + "D: " + name ); + std::to_string(attr.shape.size()) + "D: " + name); } *resource = *attr.data; } - template< typename T > - void - AttributeTypes< std::vector< T > >::oldCreateAttribute( - adios2::IO & IO, - std::string name, - const std::vector< T > & value ) + template + void AttributeTypes >::oldCreateAttribute( + adios2::IO &IO, std::string name, const std::vector &value) { - auto attr = IO.DefineAttribute( name, value.data(), value.size() ); - if( !attr ) + auto attr = IO.DefineAttribute(name, value.data(), value.size()); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining attribute '" + name + - "'." ); + "'."); } } - template< typename T > - void - AttributeTypes< std::vector< T > >::oldReadAttribute( - adios2::IO & IO, + template + void AttributeTypes >::oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - auto attr = IO.InquireAttribute< T >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed reading attribute '" + name + "'." ); + "[ADIOS2] Internal error: Failed reading attribute '" + name + + "'."); } *resource = attr.Data(); } - template < typename T > - void - AttributeTypes< std::vector< T > >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< T > & value ) + template + void AttributeTypes >::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &value) { auto size = value.size(); - auto attr = IO.InquireVariable< T >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { - attr = IO.DefineVariable< T >( - params.name, { size }, { 0 }, { size } ); + attr = IO.DefineVariable(params.name, {size}, {0}, {size}); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } - engine.Put( attr, value.data(), adios2::Mode::Deferred ); + engine.Put(attr, value.data(), adios2::Mode::Deferred); } - template< typename T > - void - AttributeTypes< std::vector< T > >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + void AttributeTypes >::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< T > attr = - preloadedAttributes.getAttribute< T >( name ); - if( attr.shape.size() != 1 ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (attr.shape.size() != 1) { - throw std::runtime_error( "[ADIOS2] Expecting 1D ADIOS variable" ); + throw std::runtime_error("[ADIOS2] Expecting 1D ADIOS variable"); } - std::vector< T > res( attr.shape[ 0 ] ); - std::copy_n( attr.data, attr.shape[ 0 ], res.data() ); - *resource = std::move( res ); + std::vector res(attr.shape[0]); + std::copy_n(attr.data, attr.shape[0], res.data()); + *resource = std::move(res); } - void - AttributeTypes< std::vector< std::string > >::oldCreateAttribute( - adios2::IO & IO, - std::string name, - const std::vector< std::string > & value ) + void AttributeTypes >::oldCreateAttribute( + adios2::IO &IO, std::string name, const std::vector &value) { - auto attr = IO.DefineAttribute( name, value.data(), value.size() ); - if( !attr ) + auto attr = IO.DefineAttribute(name, value.data(), value.size()); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining attribute '" + name + - "'." ); + "'."); } } - void - AttributeTypes< std::vector< std::string > >::oldReadAttribute( - adios2::IO & IO, + void AttributeTypes >::oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - auto attr = IO.InquireAttribute< std::string >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed reading attribute '" + name + - "'." ); + "'."); } *resource = attr.Data(); } - void - AttributeTypes< std::vector< std::string > >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< std::string > & vec ) + void AttributeTypes >::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &vec) { size_t width = 0; - for( auto const & str : vec ) + for (auto const &str : vec) { - width = std::max( width, str.size() ); + width = std::max(width, str.size()); } ++width; // null delimiter size_t const height = vec.size(); - auto attr = IO.InquireVariable< char >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { - attr = IO.DefineVariable< char >( - params.name, { height, width }, { 0, 0 }, { height, width } ); + attr = IO.DefineVariable( + params.name, {height, width}, {0, 0}, {height, width}); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } // write this thing to the params, so we don't get a use after free // due to deferred writing - params.bufferForVecString = std::vector< char >( width * height, 0 ); - for( size_t i = 0; i < height; ++i ) + params.bufferForVecString = std::vector(width * height, 0); + for (size_t i = 0; i < height; ++i) { size_t start = i * width; - std::string const & str = vec[ i ]; + std::string const &str = vec[i]; std::copy( str.begin(), str.end(), - params.bufferForVecString.begin() + start ); + params.bufferForVecString.begin() + start); } engine.Put( - attr, params.bufferForVecString.data(), adios2::Mode::Deferred ); + attr, params.bufferForVecString.data(), adios2::Mode::Deferred); } - void - AttributeTypes< std::vector< std::string > >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + void AttributeTypes >::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { /* * char_type parameter only for specifying the "template" type. */ - auto loadFromDatatype = - [ &preloadedAttributes, &name, &resource ]( auto char_type ) { - using char_t = decltype( char_type ); - detail::AttributeWithShape< char_t > attr = - preloadedAttributes.getAttribute< char_t >( name ); - if( attr.shape.size() != 2 ) - { - throw std::runtime_error( - "[ADIOS2] Expecting 2D ADIOS variable" ); - } - char_t const * loadedData = attr.data; - size_t height = attr.shape[ 0 ]; - size_t width = attr.shape[ 1 ]; + auto loadFromDatatype = [&preloadedAttributes, &name, &resource]( + auto char_type) { + using char_t = decltype(char_type); + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (attr.shape.size() != 2) + { + throw std::runtime_error( + "[ADIOS2] Expecting 2D ADIOS variable"); + } + char_t const *loadedData = attr.data; + size_t height = attr.shape[0]; + size_t width = attr.shape[1]; - std::vector< std::string > res( height ); - if( std::is_signed< char >::value == - std::is_signed< char_t >::value ) + std::vector res(height); + if (std::is_signed::value == std::is_signed::value) + { + /* + * This branch is chosen if the signedness of the + * ADIOS variable corresponds with the signedness of the + * char type on the current platform. + * In this case, the C++ standard guarantees that the + * representations for char and (un)signed char are + * identical, reinterpret_cast-ing the loadedData to + * char in order to construct our strings will be fine. + */ + for (size_t i = 0; i < height; ++i) { - /* - * This branch is chosen if the signedness of the - * ADIOS variable corresponds with the signedness of the - * char type on the current platform. - * In this case, the C++ standard guarantees that the - * representations for char and (un)signed char are - * identical, reinterpret_cast-ing the loadedData to - * char in order to construct our strings will be fine. - */ - for( size_t i = 0; i < height; ++i ) + size_t start = i * width; + char const *start_ptr = + reinterpret_cast(loadedData + start); + size_t j = 0; + while (j < width && start_ptr[j] != 0) { - size_t start = i * width; - char const * start_ptr = - reinterpret_cast< char const * >( - loadedData + start ); - size_t j = 0; - while( j < width && start_ptr[ j ] != 0 ) - { - ++j; - } - std::string & str = res[ i ]; - str.append( start_ptr, start_ptr + j ); + ++j; } + std::string &str = res[i]; + str.append(start_ptr, start_ptr + j); } - else + } + else + { + /* + * This branch is chosen if the signedness of the + * ADIOS variable is different from the signedness of the + * char type on the current platform. + * In this case, we play it safe, and explicitly convert + * the loadedData to char pointwise. + */ + std::vector converted(width); + for (size_t i = 0; i < height; ++i) { - /* - * This branch is chosen if the signedness of the - * ADIOS variable is different from the signedness of the - * char type on the current platform. - * In this case, we play it safe, and explicitly convert - * the loadedData to char pointwise. - */ - std::vector< char > converted( width ); - for( size_t i = 0; i < height; ++i ) + size_t start = i * width; + auto const *start_ptr = loadedData + start; + size_t j = 0; + while (j < width && start_ptr[j] != 0) { - size_t start = i * width; - auto const * start_ptr = loadedData + start; - size_t j = 0; - while( j < width && start_ptr[ j ] != 0 ) - { - converted[ j ] = start_ptr[ j ]; - ++j; - } - std::string & str = res[ i ]; - str.append( converted.data(), converted.data() + j ); + converted[j] = start_ptr[j]; + ++j; } + std::string &str = res[i]; + str.append(converted.data(), converted.data() + j); } + } - *resource = res; - }; + *resource = res; + }; /* * If writing char variables in ADIOS2, they might become either int8_t * or uint8_t on disk depending on the platform. * So allow reading from both types. */ - switch( preloadedAttributes.attributeType( name ) ) + switch (preloadedAttributes.attributeType(name)) { /* * Workaround for two bugs at once: @@ -2008,166 +1937,154 @@ namespace detail */ case Datatype::CHAR: { using schar_t = signed char; - loadFromDatatype( schar_t{} ); + loadFromDatatype(schar_t{}); break; } case Datatype::UCHAR: { using uchar_t = unsigned char; - loadFromDatatype( uchar_t{} ); + loadFromDatatype(uchar_t{}); break; } default: { throw std::runtime_error( "[ADIOS2] Expecting 2D ADIOS variable of " - "type signed or unsigned char." ); + "type signed or unsigned char."); } } } - template< typename T, size_t n > - void - AttributeTypes< std::array< T, n > >::oldCreateAttribute( - adios2::IO & IO, - std::string name, - const std::array< T, n > & value ) + template + void AttributeTypes >::oldCreateAttribute( + adios2::IO &IO, std::string name, const std::array &value) { - auto attr = IO.DefineAttribute( name, value.data(), n ); - if( !attr ) + auto attr = IO.DefineAttribute(name, value.data(), n); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining attribute '" + name + - "'." ); + "'."); } } - template< typename T, size_t n > - void - AttributeTypes< std::array< T, n > >::oldReadAttribute( - adios2::IO & IO, + template + void AttributeTypes >::oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - auto attr = IO.InquireAttribute< T >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed reading attribute '" + name + "'." ); + "[ADIOS2] Internal error: Failed reading attribute '" + name + + "'."); } - auto data = attr.Data( ); - std::array< T, n > res; - for ( size_t i = 0; i < n; i++ ) + auto data = attr.Data(); + std::array res; + for (size_t i = 0; i < n; i++) { res[i] = data[i]; } *resource = res; } - template< typename T, size_t n > - void - AttributeTypes< std::array< T, n > >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::array< T, n > & value ) + template + void AttributeTypes >::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::array &value) { - auto attr = IO.InquireVariable< T >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { - attr = IO.DefineVariable< T >( params.name, { n }, { 0 }, { n } ); + attr = IO.DefineVariable(params.name, {n}, {0}, {n}); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } - engine.Put( attr, value.data(), adios2::Mode::Deferred ); + engine.Put(attr, value.data(), adios2::Mode::Deferred); } - template< typename T, size_t n > - void - AttributeTypes< std::array< T, n > >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + void AttributeTypes >::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< T > attr = - preloadedAttributes.getAttribute< T >( name ); - if( attr.shape.size() != 1 || attr.shape[ 0 ] != n ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (attr.shape.size() != 1 || attr.shape[0] != n) { throw std::runtime_error( "[ADIOS2] Expecting 1D ADIOS variable of extent " + - std::to_string( n ) ); + std::to_string(n)); } - std::array< T, n > res; - std::copy_n( attr.data, n, res.data() ); - *resource = std::move( res ); + std::array res; + std::copy_n(attr.data, n, res.data()); + *resource = std::move(res); } - void - AttributeTypes< bool >::oldCreateAttribute( - adios2::IO & IO, - std::string name, - const bool value ) + void AttributeTypes::oldCreateAttribute( + adios2::IO &IO, std::string name, const bool value) { - IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_isBooleanOldLayout + name, 1 ); - AttributeTypes< bool_representation >::oldCreateAttribute( - IO, name, toRep( value ) ); + IO.DefineAttribute( + ADIOS2Defaults::str_isBooleanOldLayout + name, 1); + AttributeTypes::oldCreateAttribute( + IO, name, toRep(value)); } - void - AttributeTypes< bool >::oldReadAttribute( - adios2::IO & IO, + void AttributeTypes::oldReadAttribute( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - auto attr = IO.InquireAttribute< rep >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed reading attribute '" + name + - "'." ); + "'."); } - *resource = fromRep( attr.Data()[ 0 ] ); + *resource = fromRep(attr.Data()[0]); } - - void - AttributeTypes< bool >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const bool value ) + void AttributeTypes::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const bool value) { - IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_isBooleanNewLayout + params.name, 1 ); - AttributeTypes< bool_representation >::createAttribute( - IO, engine, params, toRep( value ) ); + IO.DefineAttribute( + ADIOS2Defaults::str_isBooleanNewLayout + params.name, 1); + AttributeTypes::createAttribute( + IO, engine, params, toRep(value)); } - void - AttributeTypes< bool >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + void AttributeTypes::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< rep > attr = - preloadedAttributes.getAttribute< rep >( name ); - if( !( attr.shape.size() == 0 || - ( attr.shape.size() == 1 && attr.shape[ 0 ] == 1 ) ) ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (!(attr.shape.size() == 0 || + (attr.shape.size() == 1 && attr.shape[0] == 1))) { throw std::runtime_error( "[ADIOS2] Expecting scalar ADIOS variable, got " + - std::to_string( attr.shape.size() ) + "D: " + name ); + std::to_string(attr.shape.size()) + "D: " + name); } - *resource = fromRep( *attr.data ); + *resource = fromRep(*attr.data); } - void BufferedGet::run( BufferedActions & ba ) + void BufferedGet::run(BufferedActions &ba) { switchAdios2VariableType( param.dtype, @@ -2175,47 +2092,44 @@ namespace detail *this, ba.m_IO, ba.getEngine(), - ba.m_file ); + ba.m_file); } - void - BufferedPut::run( BufferedActions & ba ) + void BufferedPut::run(BufferedActions &ba) { switchAdios2VariableType( - param.dtype, ba.m_writeDataset, *this, ba.m_IO, ba.getEngine() ); + param.dtype, ba.m_writeDataset, *this, ba.m_IO, ba.getEngine()); } - void - OldBufferedAttributeRead::run( BufferedActions & ba ) + void OldBufferedAttributeRead::run(BufferedActions &ba) { - auto type = attributeInfo( ba.m_IO, name, /* verbose = */ true ); + auto type = attributeInfo(ba.m_IO, name, /* verbose = */ true); - if( type == Datatype::UNDEFINED ) + if (type == Datatype::UNDEFINED) { throw std::runtime_error( "[ADIOS2] Requested attribute (" + name + - ") not found in backend." ); + ") not found in backend."); } Datatype ret = switchType( - type, detail::OldAttributeReader{}, ba.m_IO, name, param.resource ); + type, detail::OldAttributeReader{}, ba.m_IO, name, param.resource); *param.dtype = ret; } - void - BufferedAttributeRead::run( BufferedActions & ba ) + void BufferedAttributeRead::run(BufferedActions &ba) { auto type = attributeInfo( ba.m_IO, name, /* verbose = */ true, - VariableOrAttribute::Variable ); + VariableOrAttribute::Variable); - if( type == Datatype::UNDEFINED ) + if (type == Datatype::UNDEFINED) { throw std::runtime_error( "[ADIOS2] Requested attribute (" + name + - ") not found in backend." ); + ") not found in backend."); } Datatype ret = switchType( @@ -2224,34 +2138,34 @@ namespace detail ba.m_IO, ba.preloadAttributes, name, - param.resource ); + param.resource); *param.dtype = ret; } - void - BufferedAttributeWrite::run( BufferedActions & fileData ) + void BufferedAttributeWrite::run(BufferedActions &fileData) { - switchType( dtype, detail::AttributeWriter(), *this, fileData ); + switchType(dtype, detail::AttributeWriter(), *this, fileData); } BufferedActions::BufferedActions( - ADIOS2IOHandlerImpl & impl, InvalidatableFile file ) - : m_file( impl.fullPath( std::move( file ) ) ) - , m_IOName( std::to_string( impl.nameCounter++ ) ) - , m_ADIOS( impl.m_ADIOS ) - , m_IO( impl.m_ADIOS.DeclareIO( m_IOName ) ) - , m_mode( impl.adios2AccessMode( m_file ) ) - , m_writeDataset( &impl ) - , m_readDataset( &impl ) + ADIOS2IOHandlerImpl &impl, InvalidatableFile file) + : m_file(impl.fullPath(std::move(file))) + , m_IOName(std::to_string(impl.nameCounter++)) + , m_ADIOS(impl.m_ADIOS) + , m_IO(impl.m_ADIOS.DeclareIO(m_IOName)) + , m_mode(impl.adios2AccessMode(m_file)) + , m_writeDataset(&impl) + , m_readDataset(&impl) , m_attributeReader() - , m_impl( &impl ) - , m_engineType( impl.m_engineType ) + , m_impl(&impl) + , m_engineType(impl.m_engineType) { - if( !m_IO ) + if (!m_IO) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed declaring ADIOS2 IO object for file " + - m_file ); + "[ADIOS2] Internal error: Failed declaring ADIOS2 IO object " + "for file " + + m_file); } else { @@ -2266,7 +2180,7 @@ namespace detail void BufferedActions::finalize() { - if( finalized ) + if (finalized) { return; } @@ -2275,49 +2189,46 @@ namespace detail // (attributes are written upon closing a step or a file // which users might never do) bool needToWriteAttributes = !m_attributeWrites.empty(); - if( ( needToWriteAttributes || !m_engine ) && - m_mode != adios2::Mode::Read ) + if ((needToWriteAttributes || !m_engine) && + m_mode != adios2::Mode::Read) { - auto & engine = getEngine(); - if( needToWriteAttributes ) + auto &engine = getEngine(); + if (needToWriteAttributes) { - for( auto & pair : m_attributeWrites ) + for (auto &pair : m_attributeWrites) { - pair.second.run( *this ); + pair.second.run(*this); } engine.PerformPuts(); } } - if( m_engine ) + if (m_engine) { - auto & engine = m_engine.get(); + auto &engine = m_engine.get(); // might have been closed previously - if( engine ) + if (engine) { - if( streamStatus == StreamStatus::DuringStep ) + if (streamStatus == StreamStatus::DuringStep) { engine.EndStep(); } engine.Close(); - m_ADIOS.RemoveIO( m_IOName ); + m_ADIOS.RemoveIO(m_IOName); } } finalized = true; } - void - BufferedActions::configure_IO( ADIOS2IOHandlerImpl & impl ) + void BufferedActions::configure_IO(ADIOS2IOHandlerImpl &impl) { - ( void )impl; - static std::set< std::string > streamingEngines = { - "sst", "insitumpi", "inline", "staging", "nullcore", "ssc" - }; - static std::set< std::string > fileEngines = { - "bp4", "bp3", "hdf5", "file" - }; + (void)impl; + static std::set streamingEngines = { + "sst", "insitumpi", "inline", "staging", "nullcore", "ssc"}; + static std::set fileEngines = { + "bp4", "bp3", "hdf5", "file"}; // step/variable-based iteration encoding requires the new schema - if( m_impl->m_iterationEncoding == IterationEncoding::variableBased ) + if (m_impl->m_iterationEncoding == IterationEncoding::variableBased) { m_impl->m_schema = ADIOS2Schema::schema_2021_02_09; } @@ -2326,17 +2237,17 @@ namespace detail bool isStreaming = false; { // allow overriding through environment variable - m_engineType = auxiliary::getEnvString( - "OPENPMD_ADIOS2_ENGINE", m_engineType ); + m_engineType = + auxiliary::getEnvString("OPENPMD_ADIOS2_ENGINE", m_engineType); std::transform( m_engineType.begin(), m_engineType.end(), m_engineType.begin(), - []( unsigned char c ) { return std::tolower( c ); } ); + [](unsigned char c) { return std::tolower(c); }); impl.m_engineType = this->m_engineType; - m_IO.SetEngine( m_engineType ); - auto it = streamingEngines.find( m_engineType ); - if( it != streamingEngines.end() ) + m_IO.SetEngine(m_engineType); + auto it = streamingEngines.find(m_engineType); + if (it != streamingEngines.end()) { isStreaming = true; optimizeAttributesStreaming = @@ -2345,10 +2256,10 @@ namespace detail } else { - it = fileEngines.find( m_engineType ); - if( it != fileEngines.end() ) + it = fileEngines.find(m_engineType); + if (it != fileEngines.end()) { - switch( m_mode ) + switch (m_mode) { case adios2::Mode::Read: /* @@ -2366,7 +2277,7 @@ namespace detail * Default for old layout is no steps. * Default for new layout is to use steps. */ - switch( schema() ) + switch (schema()) { case SupportedSchema::s_0000_00_00: streamStatus = StreamStatus::NoStream; @@ -2377,7 +2288,7 @@ namespace detail } break; default: - throw std::runtime_error( "Unreachable!" ); + throw std::runtime_error("Unreachable!"); } optimizeAttributesStreaming = false; } @@ -2386,108 +2297,104 @@ namespace detail throw std::runtime_error( "[ADIOS2IOHandler] Unknown engine type. Please choose " "one out of " - "[sst, staging, bp4, bp3, hdf5, file, null]" ); + "[sst, staging, bp4, bp3, hdf5, file, null]"); // not listing unsupported engines } } } // set engine parameters - std::set< std::string > alreadyConfigured; - auto engineConfig = impl.config( ADIOS2Defaults::str_engine ); - if( !engineConfig.json().is_null() ) + std::set alreadyConfigured; + auto engineConfig = impl.config(ADIOS2Defaults::str_engine); + if (!engineConfig.json().is_null()) { - auto params = - impl.config( ADIOS2Defaults::str_params, engineConfig ); + auto params = impl.config(ADIOS2Defaults::str_params, engineConfig); params.declareFullyRead(); - if( params.json().is_object() ) + if (params.json().is_object()) { - for( auto it = params.json().begin(); it != params.json().end(); - it++ ) + for (auto it = params.json().begin(); it != params.json().end(); + it++) { - m_IO.SetParameter( it.key(), it.value() ); - alreadyConfigured.emplace( it.key() ); + m_IO.SetParameter(it.key(), it.value()); + alreadyConfigured.emplace(it.key()); } } auto _useAdiosSteps = - impl.config( ADIOS2Defaults::str_usesteps, engineConfig ); - if( !_useAdiosSteps.json().is_null() && - m_mode != adios2::Mode::Read ) + impl.config(ADIOS2Defaults::str_usesteps, engineConfig); + if (!_useAdiosSteps.json().is_null() && + m_mode != adios2::Mode::Read) { bool tmp = _useAdiosSteps.json(); - if( isStreaming && !bool( tmp ) ) + if (isStreaming && !bool(tmp)) { throw std::runtime_error( - "Cannot switch off steps for streaming engines." ); + "Cannot switch off steps for streaming engines."); } - streamStatus = bool( tmp ) ? StreamStatus::OutsideOfStep - : StreamStatus::NoStream; + streamStatus = bool(tmp) ? StreamStatus::OutsideOfStep + : StreamStatus::NoStream; } } auto shadow = impl.m_config.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { std::cerr << "Warning: parts of the JSON configuration for ADIOS2 " "remain unused:\n" << shadow << std::endl; } - auto notYetConfigured = - [&alreadyConfigured]( std::string const & param ) { - auto it = alreadyConfigured.find( param ); - return it == alreadyConfigured.end(); - }; + auto notYetConfigured = [&alreadyConfigured](std::string const ¶m) { + auto it = alreadyConfigured.find(param); + return it == alreadyConfigured.end(); + }; // read parameters from environment - if( notYetConfigured( "CollectiveMetadata" ) ) + if (notYetConfigured("CollectiveMetadata")) { - if( 1 == - auxiliary::getEnvNum( "OPENPMD_ADIOS2_HAVE_METADATA_FILE", 1 ) ) + if (1 == + auxiliary::getEnvNum("OPENPMD_ADIOS2_HAVE_METADATA_FILE", 1)) { - m_IO.SetParameter( "CollectiveMetadata", "On" ); + m_IO.SetParameter("CollectiveMetadata", "On"); } else { - m_IO.SetParameter( "CollectiveMetadata", "Off" ); + m_IO.SetParameter("CollectiveMetadata", "Off"); } } - if( notYetConfigured( "Profile" ) ) + if (notYetConfigured("Profile")) { - if( 1 == - auxiliary::getEnvNum( - "OPENPMD_ADIOS2_HAVE_PROFILING", 1 ) && - notYetConfigured( "Profile" ) ) + if (1 == auxiliary::getEnvNum("OPENPMD_ADIOS2_HAVE_PROFILING", 1) && + notYetConfigured("Profile")) { - m_IO.SetParameter( "Profile", "On" ); + m_IO.SetParameter("Profile", "On"); } else { - m_IO.SetParameter( "Profile", "Off" ); + m_IO.SetParameter("Profile", "Off"); } } #if openPMD_HAVE_MPI { auto num_substreams = - auxiliary::getEnvNum( "OPENPMD_ADIOS2_NUM_SUBSTREAMS", 0 ); - if( notYetConfigured( "SubStreams" ) && 0 != num_substreams ) + auxiliary::getEnvNum("OPENPMD_ADIOS2_NUM_SUBSTREAMS", 0); + if (notYetConfigured("SubStreams") && 0 != num_substreams) { - m_IO.SetParameter( - "SubStreams", std::to_string( num_substreams ) ); + m_IO.SetParameter("SubStreams", std::to_string(num_substreams)); } } -# endif - if( notYetConfigured( "StatsLevel" ) ) +#endif + if (notYetConfigured("StatsLevel")) { /* * Switch those off by default since they are expensive to compute - * and to enable it, set the JSON option "StatsLevel" or the environment - * variable "OPENPMD_ADIOS2_STATS_LEVEL" be positive. + * and to enable it, set the JSON option "StatsLevel" or the + * environment variable "OPENPMD_ADIOS2_STATS_LEVEL" be positive. * The ADIOS2 default was "1" (on). */ - auto stats_level = auxiliary::getEnvNum( "OPENPMD_ADIOS2_STATS_LEVEL", 0 ); - m_IO.SetParameter( "StatsLevel", std::to_string( stats_level ) ); + auto stats_level = + auxiliary::getEnvNum("OPENPMD_ADIOS2_STATS_LEVEL", 0); + m_IO.SetParameter("StatsLevel", std::to_string(stats_level)); } - if( m_engineType == "sst" && notYetConfigured( "QueueLimit" ) ) + if (m_engineType == "sst" && notYetConfigured("QueueLimit")) { /* * By default, the SST engine of ADIOS2 does not set a limit on its @@ -2506,7 +2413,7 @@ namespace detail * keeping pipeline parallelism a default without running the risk * of using unbound memory. */ - m_IO.SetParameter( "QueueLimit", "2" ); + m_IO.SetParameter("QueueLimit", "2"); } // We need to open the engine now already to inquire configuration @@ -2514,58 +2421,58 @@ namespace detail getEngine(); } - adios2::Engine & BufferedActions::getEngine() + adios2::Engine &BufferedActions::getEngine() { - if( !m_engine ) + if (!m_engine) { - switch( m_mode ) + switch (m_mode) { case adios2::Mode::Write: { // usesSteps attribute only written upon ::advance() // this makes sure that the attribute is only put in case // the streaming API was used. - m_IO.DefineAttribute< ADIOS2Schema::schema_t >( - ADIOS2Defaults::str_adios2Schema, m_impl->m_schema ); + m_IO.DefineAttribute( + ADIOS2Defaults::str_adios2Schema, m_impl->m_schema); m_engine = auxiliary::makeOption( - adios2::Engine( m_IO.Open( m_file, m_mode ) ) ); + adios2::Engine(m_IO.Open(m_file, m_mode))); break; } case adios2::Mode::Read: { m_engine = auxiliary::makeOption( - adios2::Engine( m_IO.Open( m_file, m_mode ) ) ); + adios2::Engine(m_IO.Open(m_file, m_mode))); // decide attribute layout // in streaming mode, this needs to be done after opening // a step // in file-based mode, we do it before - auto layoutVersion = [ IO{ m_IO } ]() mutable { - auto attr = IO.InquireAttribute< ADIOS2Schema::schema_t >( - ADIOS2Defaults::str_adios2Schema ); - if( !attr ) + auto layoutVersion = [IO{m_IO}]() mutable { + auto attr = IO.InquireAttribute( + ADIOS2Defaults::str_adios2Schema); + if (!attr) { return ADIOS2Schema::schema_0000_00_00; } else { - return attr.Data()[ 0 ]; + return attr.Data()[0]; } }; // decide streaming mode - switch( streamStatus ) + switch (streamStatus) { case StreamStatus::Undecided: { m_impl->m_schema = layoutVersion(); - auto attr = m_IO.InquireAttribute< bool_representation >( - ADIOS2Defaults::str_usesstepsAttribute ); - if( attr && attr.Data()[ 0 ] == 1 ) + auto attr = m_IO.InquireAttribute( + ADIOS2Defaults::str_usesstepsAttribute); + if (attr && attr.Data()[0] == 1) { - if( delayOpeningTheFirstStep ) + if (delayOpeningTheFirstStep) { streamStatus = StreamStatus::Parsing; } else { - if( m_engine.get().BeginStep() != - adios2::StepStatus::OK ) + if (m_engine.get().BeginStep() != + adios2::StepStatus::OK) { throw std::runtime_error( "[ADIOS2] Unexpected step status when " @@ -2581,7 +2488,7 @@ namespace detail break; } case StreamStatus::OutsideOfStep: - if( m_engine.get().BeginStep() != adios2::StepStatus::OK ) + if (m_engine.get().BeginStep() != adios2::StepStatus::OK) { throw std::runtime_error( "[ADIOS2] Unexpected step status when " @@ -2591,87 +2498,85 @@ namespace detail streamStatus = StreamStatus::DuringStep; break; default: - throw std::runtime_error( "[ADIOS2] Control flow error!" ); + throw std::runtime_error("[ADIOS2] Control flow error!"); } - if( attributeLayout() == AttributeLayout::ByAdiosVariables ) + if (attributeLayout() == AttributeLayout::ByAdiosVariables) { - preloadAttributes.preloadAttributes( m_IO, m_engine.get() ); + preloadAttributes.preloadAttributes(m_IO, m_engine.get()); } break; } default: - throw std::runtime_error( - "[ADIOS2] Invalid ADIOS access mode" ); + throw std::runtime_error("[ADIOS2] Invalid ADIOS access mode"); } - if( !m_engine.get() ) + if (!m_engine.get()) { - throw std::runtime_error( "[ADIOS2] Failed opening Engine." ); + throw std::runtime_error("[ADIOS2] Failed opening Engine."); } } return m_engine.get(); } - adios2::Engine & BufferedActions::requireActiveStep( ) + adios2::Engine &BufferedActions::requireActiveStep() { - adios2::Engine & eng = getEngine(); - if( streamStatus == StreamStatus::OutsideOfStep ) + adios2::Engine &eng = getEngine(); + if (streamStatus == StreamStatus::OutsideOfStep) { m_lastStepStatus = eng.BeginStep(); - if( m_mode == adios2::Mode::Read && - attributeLayout() == AttributeLayout::ByAdiosVariables ) + if (m_mode == adios2::Mode::Read && + attributeLayout() == AttributeLayout::ByAdiosVariables) { - preloadAttributes.preloadAttributes( m_IO, m_engine.get() ); + preloadAttributes.preloadAttributes(m_IO, m_engine.get()); } streamStatus = StreamStatus::DuringStep; } return eng; } - template < typename BA > void BufferedActions::enqueue( BA && ba ) + template + void BufferedActions::enqueue(BA &&ba) { - enqueue< BA >( std::forward< BA >( ba ), m_buffer ); + enqueue(std::forward(ba), m_buffer); } - template < typename BA > void BufferedActions::enqueue( - BA && ba, - decltype( m_buffer ) & buffer ) + template + void BufferedActions::enqueue(BA &&ba, decltype(m_buffer) &buffer) { - using _BA = typename std::remove_reference< BA >::type; - buffer.emplace_back( std::unique_ptr< BufferedAction >( - new _BA( std::forward< BA >( ba ) ) ) ); + using _BA = typename std::remove_reference::type; + buffer.emplace_back( + std::unique_ptr(new _BA(std::forward(ba)))); } - template< typename F > - void - BufferedActions::flush( + template + void BufferedActions::flush( FlushLevel level, - F && performPutGets, + F &&performPutGets, bool writeAttributes, - bool flushUnconditionally ) + bool flushUnconditionally) { - if( streamStatus == StreamStatus::StreamOver ) + if (streamStatus == StreamStatus::StreamOver) { - if( flushUnconditionally ) + if (flushUnconditionally) { throw std::runtime_error( - "[ADIOS2] Cannot access engine since stream is over." ); + "[ADIOS2] Cannot access engine since stream is over."); } return; } - auto & eng = getEngine(); + auto &eng = getEngine(); /* * Only open a new step if it is necessary. */ - if( streamStatus == StreamStatus::OutsideOfStep ) + if (streamStatus == StreamStatus::OutsideOfStep) { - if( m_buffer.empty() && - ( !writeAttributes || m_attributeWrites.empty() ) && - m_attributeReads.empty() ) + if (m_buffer.empty() && + (!writeAttributes || m_attributeWrites.empty()) && + m_attributeReads.empty()) { - if( flushUnconditionally ) + if (flushUnconditionally) { - performPutGets( *this, eng ); + performPutGets(*this, eng); } return; } @@ -2680,225 +2585,215 @@ namespace detail requireActiveStep(); } } - for( auto & ba : m_buffer ) + for (auto &ba : m_buffer) { - ba->run( *this ); + ba->run(*this); } - if( writeAttributes ) + if (writeAttributes) { - for( auto & pair : m_attributeWrites ) + for (auto &pair : m_attributeWrites) { - pair.second.run( *this ); + pair.second.run(*this); } } - if( this->m_mode == adios2::Mode::Read ) + if (this->m_mode == adios2::Mode::Read) { level = FlushLevel::UserFlush; } - switch( level ) + switch (level) { - case FlushLevel::UserFlush: - performPutGets( *this, eng ); - m_updateSpans.clear(); - m_buffer.clear(); - m_alreadyEnqueued.clear(); - if( writeAttributes ) - { - m_attributeWrites.clear(); - } + case FlushLevel::UserFlush: + performPutGets(*this, eng); + m_updateSpans.clear(); + m_buffer.clear(); + m_alreadyEnqueued.clear(); + if (writeAttributes) + { + m_attributeWrites.clear(); + } - for( BufferedAttributeRead & task : m_attributeReads ) - { - task.run( *this ); - } - m_attributeReads.clear(); - break; + for (BufferedAttributeRead &task : m_attributeReads) + { + task.run(*this); + } + m_attributeReads.clear(); + break; - case FlushLevel::InternalFlush: - case FlushLevel::SkeletonOnly: - /* - * Tasks have been given to ADIOS2, but we don't flush them - * yet. So, move everything to m_alreadyEnqueued to avoid - * use-after-free. - */ - for( auto & task : m_buffer ) - { - m_alreadyEnqueued.emplace_back( std::move( task ) ); - } - if( writeAttributes ) + case FlushLevel::InternalFlush: + case FlushLevel::SkeletonOnly: + case FlushLevel::CreateOrOpenFiles: + /* + * Tasks have been given to ADIOS2, but we don't flush them + * yet. So, move everything to m_alreadyEnqueued to avoid + * use-after-free. + */ + for (auto &task : m_buffer) + { + m_alreadyEnqueued.emplace_back(std::move(task)); + } + if (writeAttributes) + { + for (auto &task : m_attributeWrites) { - for( auto & task : m_attributeWrites ) - { - m_alreadyEnqueued.emplace_back( - std::unique_ptr< BufferedAction >{ - new BufferedAttributeWrite{ - std::move( task.second ) } } ); - } - m_attributeWrites.clear(); + m_alreadyEnqueued.emplace_back( + std::unique_ptr{ + new BufferedAttributeWrite{ + std::move(task.second)}}); } - m_buffer.clear(); - break; + m_attributeWrites.clear(); + } + m_buffer.clear(); + break; } } - void - BufferedActions::flush( FlushLevel level, bool writeAttributes ) + void BufferedActions::flush(FlushLevel level, bool writeAttributes) { flush( level, - []( BufferedActions & ba, adios2::Engine & eng ) { - switch( ba.m_mode ) + [](BufferedActions &ba, adios2::Engine &eng) { + switch (ba.m_mode) { - case adios2::Mode::Write: - eng.PerformPuts(); - break; - case adios2::Mode::Read: - eng.PerformGets(); - break; - case adios2::Mode::Append: - // TODO order? - eng.PerformGets(); - eng.PerformPuts(); - break; - default: - break; + case adios2::Mode::Write: + eng.PerformPuts(); + break; + case adios2::Mode::Read: + eng.PerformGets(); + break; + case adios2::Mode::Append: + // TODO order? + eng.PerformGets(); + eng.PerformPuts(); + break; + default: + break; } }, writeAttributes, - /* flushUnconditionally = */ false ); + /* flushUnconditionally = */ false); } - AdvanceStatus - BufferedActions::advance( AdvanceMode mode ) + AdvanceStatus BufferedActions::advance(AdvanceMode mode) { - if( streamStatus == StreamStatus::Undecided ) + if (streamStatus == StreamStatus::Undecided) { // stream status gets decided on upon opening an engine getEngine(); } // sic! no else - if( streamStatus == StreamStatus::NoStream ) + if (streamStatus == StreamStatus::NoStream) { - m_IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_usesstepsAttribute, 0 ); - flush( FlushLevel::UserFlush, /* writeAttributes = */ false ); + m_IO.DefineAttribute( + ADIOS2Defaults::str_usesstepsAttribute, 0); + flush(FlushLevel::UserFlush, /* writeAttributes = */ false); return AdvanceStatus::OK; } - m_IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_usesstepsAttribute, 1 ); - switch( mode ) + m_IO.DefineAttribute( + ADIOS2Defaults::str_usesstepsAttribute, 1); + switch (mode) { - case AdvanceMode::ENDSTEP: + case AdvanceMode::ENDSTEP: { + /* + * Advance mode write: + * Close the current step, defer opening the new step + * until one is actually needed: + * (1) The engine is accessed in BufferedActions::flush + * (2) A new step is opened before the currently active step + * has seen an access. See the following lines: open the + * step just to skip it again. + */ + if (streamStatus == StreamStatus::OutsideOfStep) { - /* - * Advance mode write: - * Close the current step, defer opening the new step - * until one is actually needed: - * (1) The engine is accessed in BufferedActions::flush - * (2) A new step is opened before the currently active step - * has seen an access. See the following lines: open the - * step just to skip it again. - */ - if( streamStatus == StreamStatus::OutsideOfStep ) + if (getEngine().BeginStep() != adios2::StepStatus::OK) { - if( getEngine().BeginStep() != adios2::StepStatus::OK ) - { - throw std::runtime_error( - "[ADIOS2] Trying to close a step that cannot be " - "opened."); - } + throw std::runtime_error( + "[ADIOS2] Trying to close a step that cannot be " + "opened."); } - flush( - FlushLevel::UserFlush, - []( BufferedActions &, adios2::Engine & eng ) - { eng.EndStep(); }, - /* writeAttributes = */ true, - /* flushUnconditionally = */ true ); - uncommittedAttributes.clear(); - m_updateSpans.clear(); - streamStatus = StreamStatus::OutsideOfStep; - return AdvanceStatus::OK; } - case AdvanceMode::BEGINSTEP: - { - adios2::StepStatus adiosStatus = m_lastStepStatus; + flush( + FlushLevel::UserFlush, + [](BufferedActions &, adios2::Engine &eng) { eng.EndStep(); }, + /* writeAttributes = */ true, + /* flushUnconditionally = */ true); + uncommittedAttributes.clear(); + m_updateSpans.clear(); + streamStatus = StreamStatus::OutsideOfStep; + return AdvanceStatus::OK; + } + case AdvanceMode::BEGINSTEP: { + adios2::StepStatus adiosStatus = m_lastStepStatus; - // Step might have been opened implicitly already - // by requireActiveStep() - // In that case, streamStatus is DuringStep and Adios - // return status is stored in m_lastStepStatus - if( streamStatus != StreamStatus::DuringStep ) - { - flush( - FlushLevel::UserFlush, - [ &adiosStatus ]( - BufferedActions &, adios2::Engine & engine ) { - adiosStatus = engine.BeginStep(); - }, - /* writeAttributes = */ false, - /* flushUnconditionally = */ true ); - if( adiosStatus == adios2::StepStatus::OK && - m_mode == adios2::Mode::Read && - attributeLayout() == AttributeLayout::ByAdiosVariables ) - { - preloadAttributes.preloadAttributes( - m_IO, m_engine.get() ); - } - } - AdvanceStatus res = AdvanceStatus::OK; - switch( adiosStatus ) + // Step might have been opened implicitly already + // by requireActiveStep() + // In that case, streamStatus is DuringStep and Adios + // return status is stored in m_lastStepStatus + if (streamStatus != StreamStatus::DuringStep) + { + flush( + FlushLevel::UserFlush, + [&adiosStatus](BufferedActions &, adios2::Engine &engine) { + adiosStatus = engine.BeginStep(); + }, + /* writeAttributes = */ false, + /* flushUnconditionally = */ true); + if (adiosStatus == adios2::StepStatus::OK && + m_mode == adios2::Mode::Read && + attributeLayout() == AttributeLayout::ByAdiosVariables) { - case adios2::StepStatus::EndOfStream: - streamStatus = StreamStatus::StreamOver; - res = AdvanceStatus::OVER; - break; - case adios2::StepStatus::OK: - streamStatus = StreamStatus::DuringStep; - res = AdvanceStatus::OK; - break; - case adios2::StepStatus::NotReady: - case adios2::StepStatus::OtherError: - throw std::runtime_error( - "[ADIOS2] Unexpected step status." ); + preloadAttributes.preloadAttributes(m_IO, m_engine.get()); } - invalidateAttributesMap(); - invalidateVariablesMap(); - return res; } + AdvanceStatus res = AdvanceStatus::OK; + switch (adiosStatus) + { + case adios2::StepStatus::EndOfStream: + streamStatus = StreamStatus::StreamOver; + res = AdvanceStatus::OVER; + break; + case adios2::StepStatus::OK: + streamStatus = StreamStatus::DuringStep; + res = AdvanceStatus::OK; + break; + case adios2::StepStatus::NotReady: + case adios2::StepStatus::OtherError: + throw std::runtime_error("[ADIOS2] Unexpected step status."); + } + invalidateAttributesMap(); + invalidateVariablesMap(); + return res; + } } throw std::runtime_error( "Internal error: Advance mode should be explicitly" - " chosen by the front-end." ); + " chosen by the front-end."); } - void BufferedActions::drop( ) + void BufferedActions::drop() { m_buffer.clear(); } - static std::vector< std::string > - availableAttributesOrVariablesPrefixed( - std::string const & prefix, - BufferedActions::AttributeMap_t const & ( - BufferedActions::*getBasicMap )(), - BufferedActions & ba ) + static std::vector availableAttributesOrVariablesPrefixed( + std::string const &prefix, + BufferedActions::AttributeMap_t const &( + BufferedActions::*getBasicMap)(), + BufferedActions &ba) { - std::string var = auxiliary::ends_with( prefix, '/' ) ? prefix - : prefix + '/'; - BufferedActions::AttributeMap_t const & attributes = - ( ba.*getBasicMap )(); - std::vector< std::string > ret; - for( auto it = attributes.lower_bound( prefix ); it != attributes.end(); - ++it ) + std::string var = + auxiliary::ends_with(prefix, '/') ? prefix : prefix + '/'; + BufferedActions::AttributeMap_t const &attributes = (ba.*getBasicMap)(); + std::vector ret; + for (auto it = attributes.lower_bound(prefix); it != attributes.end(); + ++it) { - if( auxiliary::starts_with( it->first, var ) ) + if (auxiliary::starts_with(it->first, var)) { - ret.emplace_back( - auxiliary::replace_first( it->first, var, "" ) ); + ret.emplace_back(auxiliary::replace_first(it->first, var, "")); } else { @@ -2908,126 +2803,105 @@ namespace detail return ret; } - std::vector< std::string > - BufferedActions::availableAttributesPrefixed( std::string const & prefix ) + std::vector + BufferedActions::availableAttributesPrefixed(std::string const &prefix) { return availableAttributesOrVariablesPrefixed( - prefix, - &BufferedActions::availableAttributes, - *this ); + prefix, &BufferedActions::availableAttributes, *this); } - std::vector< std::string > - BufferedActions::availableVariablesPrefixed( std::string const & prefix ) + std::vector + BufferedActions::availableVariablesPrefixed(std::string const &prefix) { return availableAttributesOrVariablesPrefixed( - prefix, - &BufferedActions::availableVariables, - *this ); + prefix, &BufferedActions::availableVariables, *this); } - void - BufferedActions::invalidateAttributesMap() + void BufferedActions::invalidateAttributesMap() { - m_availableAttributes = auxiliary::Option< AttributeMap_t >(); + m_availableAttributes = auxiliary::Option(); } BufferedActions::AttributeMap_t const & BufferedActions::availableAttributes() { - if( m_availableAttributes ) + if (m_availableAttributes) { return m_availableAttributes.get(); } else { m_availableAttributes = - auxiliary::makeOption( m_IO.AvailableAttributes() ); + auxiliary::makeOption(m_IO.AvailableAttributes()); return m_availableAttributes.get(); } } - void - BufferedActions::invalidateVariablesMap() + void BufferedActions::invalidateVariablesMap() { - m_availableVariables = auxiliary::Option< AttributeMap_t >(); + m_availableVariables = auxiliary::Option(); } - BufferedActions::AttributeMap_t const & - BufferedActions::availableVariables() + BufferedActions::AttributeMap_t const &BufferedActions::availableVariables() { - if( m_availableVariables ) + if (m_availableVariables) { return m_availableVariables.get(); } else { m_availableVariables = - auxiliary::makeOption( m_IO.AvailableVariables() ); + auxiliary::makeOption(m_IO.AvailableVariables()); return m_availableVariables.get(); } } } // namespace detail -# if openPMD_HAVE_MPI +#if openPMD_HAVE_MPI ADIOS2IOHandler::ADIOS2IOHandler( std::string path, openPMD::Access at, MPI_Comm comm, nlohmann::json options, - std::string engineType ) - : AbstractIOHandler( std::move( path ), at, comm ) - , m_impl{ this, comm, std::move( options ), std::move( engineType ) } -{ -} + std::string engineType) + : AbstractIOHandler(std::move(path), at, comm) + , m_impl{this, comm, std::move(options), std::move(engineType)} +{} #endif ADIOS2IOHandler::ADIOS2IOHandler( - std::string path, - Access at, - nlohmann::json options, - std::string engineType ) - : AbstractIOHandler( std::move( path ), at ) - , m_impl{ this, std::move( options ), std::move( engineType ) } -{ -} + std::string path, Access at, nlohmann::json options, std::string engineType) + : AbstractIOHandler(std::move(path), at) + , m_impl{this, std::move(options), std::move(engineType)} +{} -std::future< void > -ADIOS2IOHandler::flush() +std::future +ADIOS2IOHandler::flush(internal::FlushParams const &flushParams) { - return m_impl.flush(); + return m_impl.flush(flushParams); } #else // openPMD_HAVE_ADIOS2 -# if openPMD_HAVE_MPI +#if openPMD_HAVE_MPI ADIOS2IOHandler::ADIOS2IOHandler( - std::string path, - Access at, - MPI_Comm comm, - nlohmann::json, - std::string ) - : AbstractIOHandler( std::move( path ), at, comm ) -{ -} + std::string path, Access at, MPI_Comm comm, nlohmann::json, std::string) + : AbstractIOHandler(std::move(path), at, comm) +{} -# endif // openPMD_HAVE_MPI +#endif // openPMD_HAVE_MPI ADIOS2IOHandler::ADIOS2IOHandler( - std::string path, - Access at, - nlohmann::json, - std::string ) - : AbstractIOHandler( std::move( path ), at ) -{ -} + std::string path, Access at, nlohmann::json, std::string) + : AbstractIOHandler(std::move(path), at) +{} -std::future< void > ADIOS2IOHandler::flush( ) +std::future ADIOS2IOHandler::flush(internal::FlushParams const &) { - return std::future< void >( ); + return std::future(); } #endif diff --git a/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp b/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp index b55427a3b1..6b83b0ca5c 100644 --- a/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp +++ b/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp @@ -42,16 +42,14 @@ namespace detail { struct GetAlignment { - template< typename T > - constexpr size_t - operator()() const + template + constexpr size_t operator()() const { return alignof(T); } - template< unsigned long, typename... Args > - constexpr size_t - operator()( Args &&... ) const + template + constexpr size_t operator()(Args &&...) const { return alignof(std::max_align_t); } @@ -59,16 +57,14 @@ namespace detail struct GetSize { - template< typename T > - constexpr size_t - operator()() const + template + constexpr size_t operator()() const { return sizeof(T); } - template< unsigned long, typename... Args > - constexpr size_t - operator()( Args &&... ) const + template + constexpr size_t operator()(Args &&...) const { return 0; } @@ -76,30 +72,29 @@ namespace detail struct ScheduleLoad { - template< typename T > - void - operator()( - adios2::IO & IO, - adios2::Engine & engine, - std::string const & name, - char * buffer, - PreloadAdiosAttributes::AttributeLocation & location ) + template + void operator()( + adios2::IO &IO, + adios2::Engine &engine, + std::string const &name, + char *buffer, + PreloadAdiosAttributes::AttributeLocation &location) { - adios2::Variable< T > var = IO.InquireVariable< T >( name ); - if( !var ) + adios2::Variable var = IO.InquireVariable(name); + if (!var) { throw std::runtime_error( - "[ADIOS2] Variable not found: " + name ); + "[ADIOS2] Variable not found: " + name); } - adios2::Dims const & shape = location.shape; - adios2::Dims offset( shape.size(), 0 ); - if( shape.size() > 0 ) + adios2::Dims const &shape = location.shape; + adios2::Dims offset(shape.size(), 0); + if (shape.size() > 0) { - var.SetSelection( { offset, shape } ); + var.SetSelection({offset, shape}); } - T * dest = reinterpret_cast< T * >( buffer ); + T *dest = reinterpret_cast(buffer); size_t numItems = 1; - for( auto extent : shape ) + for (auto extent : shape) { numItems *= extent; } @@ -108,12 +103,12 @@ namespace detail * in a loop instead. * https://developercommunity.visualstudio.com/t/c-placement-new-is-incorrectly-compiled/206439 */ - for( size_t i = 0; i < numItems; ++i ) + for (size_t i = 0; i < numItems; ++i) { - new( dest + i ) T(); + new (dest + i) T(); } location.destroy = buffer; - engine.Get( var, dest, adios2::Mode::Deferred ); + engine.Get(var, dest, adios2::Mode::Deferred); } std::string errorMsg = "ADIOS2"; @@ -121,22 +116,20 @@ namespace detail struct VariableShape { - template< typename T > - adios2::Dims - operator()( adios2::IO & IO, std::string const & name ) + template + adios2::Dims operator()(adios2::IO &IO, std::string const &name) { - auto var = IO.InquireVariable< T >( name ); - if( !var ) + auto var = IO.InquireVariable(name); + if (!var) { throw std::runtime_error( - "[ADIOS2] Variable not found: " + name ); + "[ADIOS2] Variable not found: " + name); } return var.Shape(); } - template< unsigned long n, typename... Args > - adios2::Dims - operator()( Args &&... ) + template + adios2::Dims operator()(Args &&...) { return {}; } @@ -144,47 +137,44 @@ namespace detail struct AttributeLocationDestroy { - template< typename T > - void operator()( char *ptr, size_t numItems ) + template + void operator()(char *ptr, size_t numItems) { - T *destroy = reinterpret_cast< T * >( ptr ); - for( size_t i = 0; i < numItems; ++i ) + T *destroy = reinterpret_cast(ptr); + for (size_t i = 0; i < numItems; ++i) { - destroy[ i ].~T(); + destroy[i].~T(); } } - template< unsigned long n, typename... Args > - void operator()( Args &&... ) - { - } + template + void operator()(Args &&...) + {} }; } // namespace using AttributeLocation = PreloadAdiosAttributes::AttributeLocation; AttributeLocation::AttributeLocation( - adios2::Dims shape_in, size_t offset_in, Datatype dt_in ) - : shape( std::move( shape_in ) ), offset( offset_in ), dt( dt_in ) - { - } + adios2::Dims shape_in, size_t offset_in, Datatype dt_in) + : shape(std::move(shape_in)), offset(offset_in), dt(dt_in) + {} - AttributeLocation::AttributeLocation( AttributeLocation && other ) - : shape{ std::move( other.shape ) } - , offset{ std::move( other.offset ) } - , dt{ std::move( other.dt ) } - , destroy{ std::move( other.destroy ) } + AttributeLocation::AttributeLocation(AttributeLocation &&other) + : shape{std::move(other.shape)} + , offset{std::move(other.offset)} + , dt{std::move(other.dt)} + , destroy{std::move(other.destroy)} { other.destroy = nullptr; } - AttributeLocation & - AttributeLocation::operator=( AttributeLocation && other ) + AttributeLocation &AttributeLocation::operator=(AttributeLocation &&other) { - this->shape = std::move( other.shape ); - this->offset = std::move( other.offset ); - this->dt = std::move( other.dt ); - this->destroy = std::move( other.destroy ); + this->shape = std::move(other.shape); + this->offset = std::move(other.offset); + this->dt = std::move(other.dt); + this->destroy = std::move(other.destroy); other.destroy = nullptr; return *this; } @@ -195,88 +185,85 @@ namespace detail * If the object has been moved from, this may be empty. * Or else, if no custom destructor has been emplaced. */ - if( destroy ) + if (destroy) { size_t length = 1; - for( auto ext : shape ) + for (auto ext : shape) { length *= ext; } static AttributeLocationDestroy ald; - switchAdios2AttributeType( dt, ald, destroy, length ); + switchAdios2AttributeType(dt, ald, destroy, length); } } - void - PreloadAdiosAttributes::preloadAttributes( - adios2::IO & IO, - adios2::Engine & engine ) + void PreloadAdiosAttributes::preloadAttributes( + adios2::IO &IO, adios2::Engine &engine) { m_offsets.clear(); - std::map< Datatype, std::vector< std::string > > attributesByType; - auto addAttribute = - [ &attributesByType ]( Datatype dt, std::string name ) { - constexpr size_t reserve = 10; - auto it = attributesByType.find( dt ); - if( it == attributesByType.end() ) - { - it = attributesByType.emplace_hint( - it, dt, std::vector< std::string >() ); - it->second.reserve( reserve ); - } - it->second.push_back( std::move( name ) ); - }; + std::map > attributesByType; + auto addAttribute = [&attributesByType](Datatype dt, std::string name) { + constexpr size_t reserve = 10; + auto it = attributesByType.find(dt); + if (it == attributesByType.end()) + { + it = attributesByType.emplace_hint( + it, dt, std::vector()); + it->second.reserve(reserve); + } + it->second.push_back(std::move(name)); + }; // PHASE 1: collect names of available attributes by ADIOS datatype - for( auto & variable : IO.AvailableVariables() ) + for (auto &variable : IO.AvailableVariables()) { - if( auxiliary::ends_with( variable.first, "/__data__" ) ) + if (auxiliary::ends_with(variable.first, "/__data__")) { continue; } // this will give us basic types only, no fancy vectors or similar - Datatype dt = fromADIOS2Type( IO.VariableType( variable.first ) ); - addAttribute( dt, std::move( variable.first ) ); + Datatype dt = fromADIOS2Type(IO.VariableType(variable.first)); + addAttribute(dt, std::move(variable.first)); } // PHASE 2: get offsets for attributes in buffer - std::map< Datatype, size_t > offsets; + std::map offsets; size_t currentOffset = 0; GetAlignment switchAlignment; GetSize switchSize; VariableShape switchShape; - for( auto & pair : attributesByType ) + for (auto &pair : attributesByType) { - size_t alignment = switchAdios2AttributeType( - pair.first, switchAlignment ); - size_t size = switchAdios2AttributeType( pair.first, switchSize ); + size_t alignment = + switchAdios2AttributeType(pair.first, switchAlignment); + size_t size = switchAdios2AttributeType(pair.first, switchSize); // go to next offset with valid alignment size_t modulus = currentOffset % alignment; - if( modulus > 0 ) + if (modulus > 0) { currentOffset += alignment - modulus; } - for( std::string & name : pair.second ) + for (std::string &name : pair.second) { - adios2::Dims shape = - switchAdios2AttributeType( pair.first, switchShape, IO, name ); + adios2::Dims shape = switchAdios2AttributeType( + pair.first, switchShape, IO, name); size_t elements = 1; - for( auto extent : shape ) + for (auto extent : shape) { elements *= extent; } m_offsets.emplace( std::piecewise_construct, - std::forward_as_tuple( std::move( name ) ), + std::forward_as_tuple(std::move(name)), std::forward_as_tuple( - std::move( shape ), currentOffset, pair.first ) ); + std::move(shape), currentOffset, pair.first)); currentOffset += elements * size; } } // now, currentOffset is the number of bytes that we need to allocate // PHASE 3: allocate new buffer and schedule loads - m_rawBuffer.resize( currentOffset ); + m_rawBuffer.resize(currentOffset); ScheduleLoad switchSchedule; - for( auto & pair : m_offsets ) + for (auto &pair : m_offsets) { switchAdios2AttributeType( pair.second.dt, @@ -284,16 +271,16 @@ namespace detail IO, engine, pair.first, - &m_rawBuffer[ pair.second.offset ], - pair.second ); + &m_rawBuffer[pair.second.offset], + pair.second); } } Datatype - PreloadAdiosAttributes::attributeType( std::string const & name ) const + PreloadAdiosAttributes::attributeType(std::string const &name) const { - auto it = m_offsets.find( name ); - if( it == m_offsets.end() ) + auto it = m_offsets.find(name); + if (it == m_offsets.end()) { return Datatype::UNDEFINED; } diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index 92fef41993..3781e4070a 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -20,705 +20,719 @@ */ #include #include -#include #include +#include - -void -CommonADIOS1IOHandlerImpl::close(int64_t fd) +void CommonADIOS1IOHandlerImpl::close(int64_t fd) { int status; status = adios_close(fd); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to close ADIOS file (open_write)"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to close ADIOS file (open_write)"); } -void -CommonADIOS1IOHandlerImpl::close(ADIOS_FILE* f) +void CommonADIOS1IOHandlerImpl::close(ADIOS_FILE *f) { int status; status = adios_read_close(f); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to close ADIOS file (open_read)"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to close ADIOS file (open_read)"); } -void -CommonADIOS1IOHandlerImpl::flush_attribute(int64_t group, std::string const& name, Attribute const& att) +void CommonADIOS1IOHandlerImpl::flush_attribute( + int64_t group, std::string const &name, Attribute const &att) { auto dtype = att.dtype; // https://github.com/ComputationalRadiationPhysics/picongpu/pull/1756 - if( dtype == Datatype::BOOL ) + if (dtype == Datatype::BOOL) dtype = Datatype::UCHAR; int nelems = 0; - switch( dtype ) + switch (dtype) { using DT = Datatype; - case DT::VEC_CHAR: - nelems = att.get< std::vector< char > >().size(); - break; - case DT::VEC_SHORT: - nelems = att.get< std::vector< short > >().size(); - break; - case DT::VEC_INT: - nelems = att.get< std::vector< int > >().size(); - break; - case DT::VEC_LONG: - nelems = att.get< std::vector< long > >().size(); - break; - case DT::VEC_LONGLONG: - nelems = att.get< std::vector< long long > >().size(); - break; - case DT::VEC_UCHAR: - nelems = att.get< std::vector< unsigned char > >().size(); - break; - case DT::VEC_USHORT: - nelems = att.get< std::vector< unsigned short > >().size(); - break; - case DT::VEC_UINT: - nelems = att.get< std::vector< unsigned int > >().size(); - break; - case DT::VEC_ULONG: - nelems = att.get< std::vector< unsigned long > >().size(); - break; - case DT::VEC_ULONGLONG: - nelems = att.get< std::vector< unsigned long long > >().size(); - break; - case DT::VEC_FLOAT: - nelems = att.get< std::vector< float > >().size(); - break; - case DT::VEC_DOUBLE: - nelems = att.get< std::vector< double > >().size(); - break; - case DT::VEC_LONG_DOUBLE: - nelems = att.get< std::vector< long double > >().size(); - break; - case DT::VEC_STRING: - nelems = att.get< std::vector< std::string > >().size(); - break; - case DT::ARR_DBL_7: - nelems = 7; - break; - case DT::UNDEFINED: - case DT::DATATYPE: - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); - default: - nelems = 1; + case DT::VEC_CHAR: + nelems = att.get >().size(); + break; + case DT::VEC_SHORT: + nelems = att.get >().size(); + break; + case DT::VEC_INT: + nelems = att.get >().size(); + break; + case DT::VEC_LONG: + nelems = att.get >().size(); + break; + case DT::VEC_LONGLONG: + nelems = att.get >().size(); + break; + case DT::VEC_UCHAR: + nelems = att.get >().size(); + break; + case DT::VEC_USHORT: + nelems = att.get >().size(); + break; + case DT::VEC_UINT: + nelems = att.get >().size(); + break; + case DT::VEC_ULONG: + nelems = att.get >().size(); + break; + case DT::VEC_ULONGLONG: + nelems = att.get >().size(); + break; + case DT::VEC_FLOAT: + nelems = att.get >().size(); + break; + case DT::VEC_DOUBLE: + nelems = att.get >().size(); + break; + case DT::VEC_LONG_DOUBLE: + nelems = att.get >().size(); + break; + case DT::VEC_STRING: + nelems = att.get >().size(); + break; + case DT::ARR_DBL_7: + nelems = 7; + break; + case DT::UNDEFINED: + case DT::DATATYPE: + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); + default: + nelems = 1; } auto values = auxiliary::allocatePtr(dtype, nelems); - switch( att.dtype ) + switch (att.dtype) { using DT = Datatype; - case DT::CHAR: - { - auto ptr = reinterpret_cast< char* >(values.get()); - *ptr = att.get< char >(); - break; - } - case DT::UCHAR: - { - auto ptr = reinterpret_cast< unsigned char* >(values.get()); - *ptr = att.get< unsigned char >(); - break; - } - case DT::SHORT: - { - auto ptr = reinterpret_cast< short* >(values.get()); - *ptr = att.get< short >(); - break; - } - case DT::INT: - { - auto ptr = reinterpret_cast< int* >(values.get()); - *ptr = att.get< int >(); - break; - } - case DT::LONG: - { - auto ptr = reinterpret_cast< long* >(values.get()); - *ptr = att.get< long >(); - break; - } - case DT::LONGLONG: - { - auto ptr = reinterpret_cast< long long* >(values.get()); - *ptr = att.get< long long >(); - break; - } - case DT::USHORT: - { - auto ptr = reinterpret_cast< unsigned short* >(values.get()); - *ptr = att.get< unsigned short >(); - break; - } - case DT::UINT: - { - auto ptr = reinterpret_cast< unsigned int* >(values.get()); - *ptr = att.get< unsigned int >(); - break; - } - case DT::ULONG: - { - auto ptr = reinterpret_cast< unsigned long* >(values.get()); - *ptr = att.get< unsigned long >(); - break; - } - case DT::ULONGLONG: - { - auto ptr = reinterpret_cast< unsigned long long* >(values.get()); - *ptr = att.get< unsigned long long >(); - break; - } - case DT::FLOAT: - { - auto ptr = reinterpret_cast< float* >(values.get()); - *ptr = att.get< float >(); - break; - } - case DT::DOUBLE: - { - auto ptr = reinterpret_cast< double* >(values.get()); - *ptr = att.get< double >(); - break; - } - case DT::LONG_DOUBLE: - { - auto ptr = reinterpret_cast< long double* >(values.get()); - *ptr = att.get< long double >(); - break; - } - case DT::CFLOAT: - { - auto ptr = reinterpret_cast< std::complex< float >* >(values.get()); - *ptr = att.get< std::complex< float > >(); - break; - } - case DT::CDOUBLE: - { - auto ptr = reinterpret_cast< std::complex< double >* >(values.get()); - *ptr = att.get< std::complex< double > >(); - break; - } - case DT::CLONG_DOUBLE: - { - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (CLONG_DOUBLE)"); - break; - } - case DT::STRING: - { - auto const & v = att.get< std::string >(); - values = auxiliary::allocatePtr(Datatype::CHAR, v.length() + 1u); - strcpy((char*)values.get(), v.c_str()); - break; - } - case DT::VEC_CHAR: - { - auto ptr = reinterpret_cast< char* >(values.get()); - auto const& vec = att.get< std::vector< char > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_SHORT: - { - auto ptr = reinterpret_cast< short* >(values.get()); - auto const& vec = att.get< std::vector< short > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_INT: - { - auto ptr = reinterpret_cast< int* >(values.get()); - auto const& vec = att.get< std::vector< int > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONG: - { - auto ptr = reinterpret_cast< long* >(values.get()); - auto const& vec = att.get< std::vector< long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONGLONG: - { - auto ptr = reinterpret_cast< long long* >(values.get()); - auto const& vec = att.get< std::vector< long long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_UCHAR: - { - auto ptr = reinterpret_cast< unsigned char* >(values.get()); - auto const& vec = att.get< std::vector< unsigned char > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_USHORT: - { - auto ptr = reinterpret_cast< unsigned short* >(values.get()); - auto const& vec = att.get< std::vector< unsigned short > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_UINT: - { - auto ptr = reinterpret_cast< unsigned int* >(values.get()); - auto const& vec = att.get< std::vector< unsigned int > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_ULONG: - { - auto ptr = reinterpret_cast< unsigned long* >(values.get()); - auto const& vec = att.get< std::vector< unsigned long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_ULONGLONG: - { - auto ptr = reinterpret_cast< unsigned long long* >(values.get()); - auto const& vec = att.get< std::vector< unsigned long long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_FLOAT: - { - auto ptr = reinterpret_cast< float* >(values.get()); - auto const& vec = att.get< std::vector< float > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_DOUBLE: - { - auto ptr = reinterpret_cast< double* >(values.get()); - auto const& vec = att.get< std::vector< double > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONG_DOUBLE: - { - auto ptr = reinterpret_cast< long double* >(values.get()); - auto const& vec = att.get< std::vector< long double > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - /* not supported by ADIOS 1.13.1: - * https://github.com/ornladios/ADIOS/issues/212 - */ - case DT::VEC_CFLOAT: - case DT::VEC_CDOUBLE: - case DT::VEC_CLONG_DOUBLE: - { - throw std::runtime_error("[ADIOS1] Arrays of complex attributes are not supported"); - break; - } - case DT::VEC_STRING: - { - auto ptr = reinterpret_cast< char** >(values.get()); - auto const& vec = att.get< std::vector< std::string > >(); - for( size_t i = 0; i < vec.size(); ++i ) - { - size_t size = vec[i].size() + 1; - ptr[i] = new char[size]; - strncpy(ptr[i], vec[i].c_str(), size); - } - break; - } - case DT::ARR_DBL_7: - { - auto ptr = reinterpret_cast< double* >(values.get()); - auto const& arr = att.get< std::array< double, 7> >(); - for( size_t i = 0; i < 7; ++i ) - ptr[i] = arr[i]; - break; - } - case DT::BOOL: + case DT::CHAR: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::UCHAR: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::SHORT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::INT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::LONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::LONGLONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::USHORT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::UINT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::ULONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::ULONGLONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::FLOAT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::LONG_DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::CFLOAT: { + auto ptr = reinterpret_cast *>(values.get()); + *ptr = att.get >(); + break; + } + case DT::CDOUBLE: { + auto ptr = reinterpret_cast *>(values.get()); + *ptr = att.get >(); + break; + } + case DT::CLONG_DOUBLE: { + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (CLONG_DOUBLE)"); + break; + } + case DT::STRING: { + auto const &v = att.get(); + values = auxiliary::allocatePtr(Datatype::CHAR, v.length() + 1u); + strcpy((char *)values.get(), v.c_str()); + break; + } + case DT::VEC_CHAR: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_SHORT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_INT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_LONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_LONGLONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_UCHAR: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_USHORT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_UINT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_ULONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_ULONGLONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_FLOAT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_LONG_DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + /* not supported by ADIOS 1.13.1: + * https://github.com/ornladios/ADIOS/issues/212 + */ + case DT::VEC_CFLOAT: + case DT::VEC_CDOUBLE: + case DT::VEC_CLONG_DOUBLE: { + throw std::runtime_error( + "[ADIOS1] Arrays of complex attributes are not supported"); + break; + } + case DT::VEC_STRING: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get >(); + for (size_t i = 0; i < vec.size(); ++i) { - auto ptr = reinterpret_cast< unsigned char* >(values.get()); - *ptr = static_cast< unsigned char >(att.get< bool >()); - break; + size_t size = vec[i].size() + 1; + ptr[i] = new char[size]; + strncpy(ptr[i], vec[i].c_str(), size); } - case DT::UNDEFINED: - case DT::DATATYPE: - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); - default: - throw std::runtime_error("[ADIOS1] Datatype not implemented in ADIOS IO"); + break; + } + case DT::ARR_DBL_7: { + auto ptr = reinterpret_cast(values.get()); + auto const &arr = att.get >(); + for (size_t i = 0; i < 7; ++i) + ptr[i] = arr[i]; + break; + } + case DT::BOOL: { + auto ptr = reinterpret_cast(values.get()); + *ptr = static_cast(att.get()); + break; + } + case DT::UNDEFINED: + case DT::DATATYPE: + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); + default: + throw std::runtime_error( + "[ADIOS1] Datatype not implemented in ADIOS IO"); } int status; - status = adios_define_attribute_byvalue(group, - name.c_str(), - "", - getBP1DataType(att.dtype), - nelems, - values.get()); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to define ADIOS attribute by value"); - - if( att.dtype == Datatype::VEC_STRING ) + status = adios_define_attribute_byvalue( + group, + name.c_str(), + "", + getBP1DataType(att.dtype), + nelems, + values.get()); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to define ADIOS attribute by value"); + + if (att.dtype == Datatype::VEC_STRING) { - auto ptr = reinterpret_cast< char** >(values.get()); - for( int i = 0; i < nelems; ++i ) + auto ptr = reinterpret_cast(values.get()); + for (int i = 0; i < nelems; ++i) delete[] ptr[i]; } } -void -CommonADIOS1IOHandlerImpl::createFile(Writable* writable, - Parameter< Operation::CREATE_FILE > const& parameters) +void CommonADIOS1IOHandlerImpl::createFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Creating a file in read-only mode is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Creating a file in read-only mode is not possible."); - if( !writable->written ) + if (!writable->written) { - if( !auxiliary::directory_exists(m_handler->directory) ) + if (!auxiliary::directory_exists(m_handler->directory)) { bool success = auxiliary::create_directories(m_handler->directory); - VERIFY(success, "[ADIOS1] Internal error: Failed to create directories during ADIOS file creation"); + VERIFY( + success, + "[ADIOS1] Internal error: Failed to create directories during " + "ADIOS file creation"); } std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".bp") ) + if (!auxiliary::ends_with(name, ".bp")) name += ".bp"; writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >("/"); + writable->abstractFilePosition = + std::make_shared("/"); - m_filePaths[writable] = std::make_shared< std::string >(name); + m_filePaths[writable] = std::make_shared(name); /* our control flow allows for more than one open file handle - * if multiple files are opened with the same group, data might be lost */ + * if multiple files are opened with the same group, data might be lost + */ - /* defer actually opening the file handle until the first Operation::WRITE_DATASET occurs */ + /* defer actually opening the file handle until the first + * Operation::WRITE_DATASET occurs */ m_existsOnDisk[m_filePaths[writable]] = false; GetFileHandle(writable); } } -void -CommonADIOS1IOHandlerImpl::createPath(Writable* writable, - Parameter< Operation::CREATE_PATH > const& parameters) +void CommonADIOS1IOHandlerImpl::createPath( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Creating a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Creating a path in a file opened as read only is not " + "possible."); - if( !writable->written ) + if (!writable->written) { /* Sanitize path */ std::string path = parameters.path; - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; /* ADIOS has no concept for explicitly creating paths. - * They are implicitly created with the paths of variables/attributes. */ + * They are implicitly created with the paths of variables/attributes. + */ writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >(path); + writable->abstractFilePosition = + std::make_shared(path); - Writable* position; - if( writable->parent ) + Writable *position; + if (writable->parent) position = writable->parent; else - position = writable; /* root does not have a parent but might still have to be written */ + position = writable; /* root does not have a parent but might still + have to be written */ auto res = m_filePaths.find(position); m_filePaths[writable] = res->second; } } -void -CommonADIOS1IOHandlerImpl::createDataset(Writable* writable, - Parameter< Operation::CREATE_DATASET > const& parameters) +void CommonADIOS1IOHandlerImpl::createDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Creating a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Creating a dataset in a file opened as read only is not " + "possible."); - if( !writable->written ) + if (!writable->written) { - /* ADIOS variable definitions require the file to be (re-)opened to take effect/not cause errors */ + /* ADIOS variable definitions require the file to be (re-)opened to take + * effect/not cause errors */ auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); int64_t group = m_groups[res->second]; /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( auxiliary::ends_with(name, '/') ) + if (auxiliary::ends_with(name, '/')) name = auxiliary::replace_last(name, "/", ""); std::string path = concrete_bp1_file_position(writable) + name; size_t ndims = parameters.extent.size(); - std::vector< std::string > chunkSize(ndims, ""); - std::vector< std::string > chunkOffset(ndims, ""); + std::vector chunkSize(ndims, ""); + std::vector chunkOffset(ndims, ""); int64_t id; - for( size_t i = 0; i < ndims; ++i ) + for (size_t i = 0; i < ndims; ++i) { chunkSize[i] = "/tmp" + path + "_chunkSize" + std::to_string(i); - id = adios_define_var(group, chunkSize[i].c_str(), "", adios_unsigned_long, "", "", ""); - VERIFY(id != 0, "[ADIOS1] Internal error: Failed to define ADIOS variable during Dataset creation"); + id = adios_define_var( + group, + chunkSize[i].c_str(), + "", + adios_unsigned_long, + "", + "", + ""); + VERIFY( + id != 0, + "[ADIOS1] Internal error: Failed to define ADIOS variable " + "during Dataset creation"); chunkOffset[i] = "/tmp" + path + "_chunkOffset" + std::to_string(i); - id = adios_define_var(group, chunkOffset[i].c_str(), "", adios_unsigned_long, "", "", ""); - VERIFY(id != 0, "[ADIOS1] Internal error: Failed to define ADIOS variable during Dataset creation"); + id = adios_define_var( + group, + chunkOffset[i].c_str(), + "", + adios_unsigned_long, + "", + "", + ""); + VERIFY( + id != 0, + "[ADIOS1] Internal error: Failed to define ADIOS variable " + "during Dataset creation"); } std::string chunkSizeParam = auxiliary::join(chunkSize, ","); std::string globalSize = getBP1Extent(parameters.extent); std::string chunkOffsetParam = auxiliary::join(chunkOffset, ","); - id = adios_define_var(group, - path.c_str(), - "", - getBP1DataType(parameters.dtype), - chunkSizeParam.c_str(), - globalSize.c_str(), - chunkOffsetParam.c_str()); - VERIFY(id != 0, "[ADIOS1] Internal error: Failed to define ADIOS variable during Dataset creation"); - - if( !parameters.compression.empty() ) - std::cerr << "Custom compression not compatible with ADIOS1 backend. Use transform instead." + id = adios_define_var( + group, + path.c_str(), + "", + getBP1DataType(parameters.dtype), + chunkSizeParam.c_str(), + globalSize.c_str(), + chunkOffsetParam.c_str()); + VERIFY( + id != 0, + "[ADIOS1] Internal error: Failed to define ADIOS variable during " + "Dataset creation"); + + if (!parameters.compression.empty()) + std::cerr << "Custom compression not compatible with ADIOS1 " + "backend. Use transform instead." << std::endl; - if( !parameters.transform.empty() ) + if (!parameters.transform.empty()) { int status; status = adios_set_transform(id, parameters.transform.c_str()); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to set ADIOS transform during Dataset cretaion"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to set ADIOS transform during " + "Dataset cretaion"); } writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >(name); + writable->abstractFilePosition = + std::make_shared(name); m_filePaths[writable] = res->second; } } -void -CommonADIOS1IOHandlerImpl::extendDataset(Writable*, - Parameter< Operation::EXTEND_DATASET > const&) +void CommonADIOS1IOHandlerImpl::extendDataset( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Dataset extension not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Dataset extension not implemented in ADIOS backend"); } -void -CommonADIOS1IOHandlerImpl::openFile(Writable* writable, - Parameter< Operation::OPEN_FILE > const& parameters) +void CommonADIOS1IOHandlerImpl::openFile( + Writable *writable, Parameter const ¶meters) { - if( !auxiliary::directory_exists(m_handler->directory) ) - throw no_such_file_error("[ADIOS1] Supplied directory is not valid: " + m_handler->directory); + if (!auxiliary::directory_exists(m_handler->directory)) + throw no_such_file_error( + "[ADIOS1] Supplied directory is not valid: " + + m_handler->directory); std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".bp") ) + if (!auxiliary::ends_with(name, ".bp")) name += ".bp"; - std::shared_ptr< std::string > filePath; - auto it = std::find_if(m_filePaths.begin(), m_filePaths.end(), - [name](std::unordered_map< Writable*, std::shared_ptr< std::string > >::value_type const& entry){ return *entry.second == name; }); - if( it == m_filePaths.end() ) - filePath = std::make_shared< std::string >(name); + std::shared_ptr filePath; + auto it = std::find_if( + m_filePaths.begin(), + m_filePaths.end(), + [name](std::unordered_map >:: + value_type const &entry) { return *entry.second == name; }); + if (it == m_filePaths.end()) + filePath = std::make_shared(name); else filePath = it->second; - if( m_handler->m_backendAccess == Access::CREATE ) + if (m_handler->m_backendAccess == Access::CREATE) { // called at Series::flush for iterations that has been flushed before - // this is to make sure to point the Series.m_writer points to this iteration - // so when call Series.flushAttribute(), the attributes can be flushed to the iteration level file. + // this is to make sure to point the Series.m_writer points to this + // iteration so when call Series.flushAttribute(), the attributes can be + // flushed to the iteration level file. m_filePaths[writable] = filePath; writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >("/"); + writable->abstractFilePosition = + std::make_shared("/"); return; - } + } /* close the handle that corresponds to the file we want to open */ - if( m_openWriteFileHandles.find(filePath) != m_openWriteFileHandles.end() ) + if (m_openWriteFileHandles.find(filePath) != m_openWriteFileHandles.end()) { close(m_openWriteFileHandles[filePath]); m_openWriteFileHandles.erase(filePath); } - if( m_groups.find(filePath) == m_groups.end() ) + if (m_groups.find(filePath) == m_groups.end()) m_groups[filePath] = initialize_group(name); - if( m_openReadFileHandles.find(filePath) == m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(filePath) == m_openReadFileHandles.end()) { - ADIOS_FILE* f = open_read(name); + ADIOS_FILE *f = open_read(name); m_openReadFileHandles[filePath] = f; } writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >("/"); + writable->abstractFilePosition = std::make_shared("/"); m_filePaths[writable] = filePath; m_existsOnDisk[filePath] = true; } -void -CommonADIOS1IOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & ) +void CommonADIOS1IOHandlerImpl::closeFile( + Writable *writable, Parameter const &) { - auto myFile = m_filePaths.find( writable ); - if( myFile == m_filePaths.end() ) + auto myFile = m_filePaths.find(writable); + if (myFile == m_filePaths.end()) { return; } // finish write operations - auto myGroup = m_groups.find( myFile->second ); - if( myGroup != m_groups.end() ) + auto myGroup = m_groups.find(myFile->second); + if (myGroup != m_groups.end()) { - auto attributeWrites = m_attributeWrites.find( myGroup->second ); - if( this->m_handler->m_backendAccess != Access::READ_ONLY && - attributeWrites != m_attributeWrites.end() ) + auto attributeWrites = m_attributeWrites.find(myGroup->second); + if (this->m_handler->m_backendAccess != Access::READ_ONLY && + attributeWrites != m_attributeWrites.end()) { - for( auto & att : attributeWrites->second ) + for (auto &att : attributeWrites->second) { - flush_attribute( myGroup->second, att.first, att.second ); + flush_attribute(myGroup->second, att.first, att.second); } - m_attributeWrites.erase( attributeWrites ); + m_attributeWrites.erase(attributeWrites); } - m_groups.erase( myGroup ); + m_groups.erase(myGroup); } - auto handle_write = m_openWriteFileHandles.find( myFile->second ); - if( handle_write != m_openWriteFileHandles.end() ) + auto handle_write = m_openWriteFileHandles.find(myFile->second); + if (handle_write != m_openWriteFileHandles.end()) { - close( handle_write->second ); - m_openWriteFileHandles.erase( handle_write ); + close(handle_write->second); + m_openWriteFileHandles.erase(handle_write); } // finish read operations - auto handle_read = m_openReadFileHandles.find( myFile->second ); - if( handle_read != m_openReadFileHandles.end() ) + auto handle_read = m_openReadFileHandles.find(myFile->second); + if (handle_read != m_openReadFileHandles.end()) { - auto scheduled = m_scheduledReads.find( handle_read->second ); - if( scheduled != m_scheduledReads.end() ) + auto scheduled = m_scheduledReads.find(handle_read->second); + if (scheduled != m_scheduledReads.end()) { - auto status = adios_perform_reads( scheduled->first, 1 ); + auto status = adios_perform_reads(scheduled->first, 1); VERIFY( status == err_no_error, "[ADIOS1] Internal error: Failed to perform ADIOS reads during " - "dataset reading" ); + "dataset reading"); - for( auto & sel : scheduled->second ) - adios_selection_delete( sel ); - m_scheduledReads.erase( scheduled ); + for (auto &sel : scheduled->second) + adios_selection_delete(sel.selection); + m_scheduledReads.erase(scheduled); } - close( handle_read->second ); - m_openReadFileHandles.erase( handle_read ); + close(handle_read->second); + m_openReadFileHandles.erase(handle_read); } - m_existsOnDisk.erase( myFile->second ); - m_filePaths.erase( myFile ); + m_existsOnDisk.erase(myFile->second); + m_filePaths.erase(myFile); } -void -CommonADIOS1IOHandlerImpl::availableChunks( - Writable * writable, - Parameter< Operation::AVAILABLE_CHUNKS > & params ) +void CommonADIOS1IOHandlerImpl::availableChunks( + Writable *writable, Parameter ¶ms) { - ADIOS_FILE * f; - f = m_openReadFileHandles.at( m_filePaths.at( writable ) ); - std::string name = concrete_bp1_file_position( writable ); + ADIOS_FILE *f; + f = m_openReadFileHandles.at(m_filePaths.at(writable)); + std::string name = concrete_bp1_file_position(writable); VERIFY( - std::strcmp( f->path, m_filePaths.at( writable )->c_str() ) == 0, - "[ADIOS1] Internal Error: Invalid ADIOS read file handle" ); - ADIOS_VARINFO * varinfo = adios_inq_var( f, name.c_str() ); + std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, + "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); + ADIOS_VARINFO *varinfo = adios_inq_var(f, name.c_str()); VERIFY( adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to inquire ADIOS variable while " - "querying available chunks." ); - int err = adios_inq_var_blockinfo( f, varinfo ); + "querying available chunks."); + int err = adios_inq_var_blockinfo(f, varinfo); VERIFY( err == 0, "[ADIOS1] Internal error: Failed to obtain ADIOS varinfo while " - "querying available chunks." ); - int nblocks = - varinfo->nblocks[ 0 ]; // we don't use steps, so index 0 is fine + "querying available chunks."); + int nblocks = varinfo->nblocks[0]; // we don't use steps, so index 0 is fine int ndim = varinfo->ndim; - auto & table = *params.chunks; - table.reserve( nblocks ); - for( int block = 0; block < nblocks; ++block ) + auto &table = *params.chunks; + table.reserve(nblocks); + for (int block = 0; block < nblocks; ++block) { - ADIOS_VARBLOCK & varblock = varinfo->blockinfo[ block ]; - Offset offset( ndim ); - Extent extent( ndim ); - for( int i = 0; i < ndim; ++i ) + ADIOS_VARBLOCK &varblock = varinfo->blockinfo[block]; + Offset offset(ndim); + Extent extent(ndim); + for (int i = 0; i < ndim; ++i) { - offset[ i ] = varblock.start[ i ]; - extent[ i ] = varblock.count[ i ]; + offset[i] = varblock.start[i]; + extent[i] = varblock.count[i]; } - table.emplace_back( offset, extent, int( varblock.process_id ) ); + table.emplace_back(offset, extent, int(varblock.process_id)); } - adios_free_varinfo( varinfo ); + adios_free_varinfo(varinfo); } -void -CommonADIOS1IOHandlerImpl::openPath( - Writable * writable, - Parameter< Operation::OPEN_PATH > const & parameters ) +void CommonADIOS1IOHandlerImpl::openPath( + Writable *writable, Parameter const ¶meters) { /* Sanitize path */ std::string path = parameters.path; - if( !path.empty() ) + if (!path.empty()) { - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; } writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >(path); + writable->abstractFilePosition = std::make_shared(path); - auto res = writable->parent ? m_filePaths.find(writable->parent) : m_filePaths.find(writable); + auto res = writable->parent ? m_filePaths.find(writable->parent) + : m_filePaths.find(writable); m_filePaths[writable] = res->second; } -void -CommonADIOS1IOHandlerImpl::openDataset(Writable* writable, - Parameter< Operation::OPEN_DATASET >& parameters) +void CommonADIOS1IOHandlerImpl::openDataset( + Writable *writable, Parameter ¶meters) { ADIOS_FILE *f; auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); f = m_openReadFileHandles.at(res->second); /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); std::string datasetname = writable->abstractFilePosition ? concrete_bp1_file_position(writable) : concrete_bp1_file_position(writable) + name; - ADIOS_VARINFO* vi; - vi = adios_inq_var(f, - datasetname.c_str()); + ADIOS_VARINFO *vi; + vi = adios_inq_var(f, datasetname.c_str()); std::string error_string("[ADIOS1] Internal error: "); error_string.append("Failed to inquire about ADIOS variable '") - .append(datasetname) - .append("' during dataset opening"); + .append(datasetname) + .append("' during dataset opening"); VERIFY(adios_errno == err_no_error, error_string); VERIFY(vi != nullptr, error_string); @@ -726,153 +740,165 @@ CommonADIOS1IOHandlerImpl::openDataset(Writable* writable, // note the ill-named fixed-byte adios_... types // https://github.com/ornladios/ADIOS/issues/187 - switch( vi->type ) + switch (vi->type) { using DT = Datatype; - case adios_byte: - dtype = DT::CHAR; - break; - case adios_short: - if( sizeof(short) == 2u ) - dtype = DT::SHORT; - else if( sizeof(int) == 2u ) - dtype = DT::INT; - else if( sizeof(long) == 2u ) - dtype = DT::LONG; - else if( sizeof(long long) == 2u ) - dtype = DT::LONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_short found."); - break; - case adios_integer: - if( sizeof(short) == 4u ) - dtype = DT::SHORT; - else if( sizeof(int) == 4u ) - dtype = DT::INT; - else if( sizeof(long) == 4u ) - dtype = DT::LONG; - else if( sizeof(long long) == 4u ) - dtype = DT::LONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_integer found."); - break; - case adios_long: - if( sizeof(short) == 8u ) - dtype = DT::SHORT; - else if( sizeof(int) == 8u ) - dtype = DT::INT; - else if( sizeof(long) == 8u ) - dtype = DT::LONG; - else if( sizeof(long long) == 8u ) - dtype = DT::LONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_long found."); - break; - case adios_unsigned_byte: - dtype = DT::UCHAR; - break; - case adios_unsigned_short: - if( sizeof(unsigned short) == 2u ) - dtype = DT::USHORT; - else if( sizeof(unsigned int) == 2u ) - dtype = DT::UINT; - else if( sizeof(unsigned long) == 2u ) - dtype = DT::ULONG; - else if( sizeof(unsigned long long) == 2u ) - dtype = DT::ULONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_short found."); - break; - case adios_unsigned_integer: - if( sizeof(unsigned short) == 4u ) - dtype = DT::USHORT; - else if( sizeof(unsigned int) == 4u ) - dtype = DT::UINT; - else if( sizeof(unsigned long) == 4u ) - dtype = DT::ULONG; - else if( sizeof(unsigned long long) == 4u ) - dtype = DT::ULONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_integer found."); - break; - case adios_unsigned_long: - if( sizeof(unsigned short) == 8u ) - dtype = DT::USHORT; - else if( sizeof(unsigned int) == 8u ) - dtype = DT::UINT; - else if( sizeof(unsigned long) == 8u ) - dtype = DT::ULONG; - else if( sizeof(unsigned long long) == 8u ) - dtype = DT::ULONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_long found."); - break; - case adios_real: - dtype = DT::FLOAT; - break; - case adios_double: - dtype = DT::DOUBLE; - break; - case adios_long_double: - dtype = DT::LONG_DOUBLE; - break; - case adios_complex: - dtype = DT::CFLOAT; - break; - case adios_double_complex: - dtype = DT::CDOUBLE; - break; - - case adios_string: - case adios_string_array: - default: - throw unsupported_data_error("[ADIOS1] Datatype not implemented for ADIOS dataset writing"); + case adios_byte: + dtype = DT::CHAR; + break; + case adios_short: + if (sizeof(short) == 2u) + dtype = DT::SHORT; + else if (sizeof(int) == 2u) + dtype = DT::INT; + else if (sizeof(long) == 2u) + dtype = DT::LONG; + else if (sizeof(long long) == 2u) + dtype = DT::LONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_short " + "found."); + break; + case adios_integer: + if (sizeof(short) == 4u) + dtype = DT::SHORT; + else if (sizeof(int) == 4u) + dtype = DT::INT; + else if (sizeof(long) == 4u) + dtype = DT::LONG; + else if (sizeof(long long) == 4u) + dtype = DT::LONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_integer " + "found."); + break; + case adios_long: + if (sizeof(short) == 8u) + dtype = DT::SHORT; + else if (sizeof(int) == 8u) + dtype = DT::INT; + else if (sizeof(long) == 8u) + dtype = DT::LONG; + else if (sizeof(long long) == 8u) + dtype = DT::LONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_long found."); + break; + case adios_unsigned_byte: + dtype = DT::UCHAR; + break; + case adios_unsigned_short: + if (sizeof(unsigned short) == 2u) + dtype = DT::USHORT; + else if (sizeof(unsigned int) == 2u) + dtype = DT::UINT; + else if (sizeof(unsigned long) == 2u) + dtype = DT::ULONG; + else if (sizeof(unsigned long long) == 2u) + dtype = DT::ULONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_short found."); + break; + case adios_unsigned_integer: + if (sizeof(unsigned short) == 4u) + dtype = DT::USHORT; + else if (sizeof(unsigned int) == 4u) + dtype = DT::UINT; + else if (sizeof(unsigned long) == 4u) + dtype = DT::ULONG; + else if (sizeof(unsigned long long) == 4u) + dtype = DT::ULONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_integer found."); + break; + case adios_unsigned_long: + if (sizeof(unsigned short) == 8u) + dtype = DT::USHORT; + else if (sizeof(unsigned int) == 8u) + dtype = DT::UINT; + else if (sizeof(unsigned long) == 8u) + dtype = DT::ULONG; + else if (sizeof(unsigned long long) == 8u) + dtype = DT::ULONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_long found."); + break; + case adios_real: + dtype = DT::FLOAT; + break; + case adios_double: + dtype = DT::DOUBLE; + break; + case adios_long_double: + dtype = DT::LONG_DOUBLE; + break; + case adios_complex: + dtype = DT::CFLOAT; + break; + case adios_double_complex: + dtype = DT::CDOUBLE; + break; + + case adios_string: + case adios_string_array: + default: + throw unsupported_data_error( + "[ADIOS1] Datatype not implemented for ADIOS dataset writing"); } *parameters.dtype = dtype; Extent e; e.resize(vi->ndim); - for( int i = 0; i < vi->ndim; ++i ) + for (int i = 0; i < vi->ndim; ++i) e[i] = vi->dims[i]; *parameters.extent = e; writable->written = true; - if( !writable->abstractFilePosition ) + if (!writable->abstractFilePosition) { - writable->abstractFilePosition - = std::make_shared< ADIOS1FilePosition >(name); + writable->abstractFilePosition = + std::make_shared(name); } m_openReadFileHandles[res->second] = f; m_filePaths[writable] = res->second; } -void -CommonADIOS1IOHandlerImpl::deleteFile(Writable* writable, - Parameter< Operation::DELETE_FILE > const& parameters) +void CommonADIOS1IOHandlerImpl::deleteFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Deleting a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Deleting a file opened as read only is not possible."); - if( writable->written ) + if (writable->written) { auto path = m_filePaths.at(writable); - if( m_openReadFileHandles.find(path) != m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(path) != m_openReadFileHandles.end()) { close(m_openReadFileHandles.at(path)); m_openReadFileHandles.erase(path); } - if( m_openWriteFileHandles.find(path) != m_openWriteFileHandles.end() ) + if (m_openWriteFileHandles.find(path) != m_openWriteFileHandles.end()) { close(m_openWriteFileHandles.at(path)); m_openWriteFileHandles.erase(path); } std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".bp") ) + if (!auxiliary::ends_with(name, ".bp")) name += ".bp"; - if( !auxiliary::file_exists(name) ) + if (!auxiliary::file_exists(name)) throw std::runtime_error("[ADIOS1] File does not exist: " + name); auxiliary::remove_file(name); @@ -884,52 +910,55 @@ CommonADIOS1IOHandlerImpl::deleteFile(Writable* writable, } } -void -CommonADIOS1IOHandlerImpl::deletePath(Writable*, - Parameter< Operation::DELETE_PATH > const&) +void CommonADIOS1IOHandlerImpl::deletePath( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Path deletion not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Path deletion not implemented in ADIOS backend"); } -void -CommonADIOS1IOHandlerImpl::deleteDataset(Writable*, - Parameter< Operation::DELETE_DATASET > const&) +void CommonADIOS1IOHandlerImpl::deleteDataset( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Dataset deletion not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Dataset deletion not implemented in ADIOS backend"); } -void -CommonADIOS1IOHandlerImpl::deleteAttribute(Writable*, - Parameter< Operation::DELETE_ATT > const&) +void CommonADIOS1IOHandlerImpl::deleteAttribute( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Attribute deletion not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Attribute deletion not implemented in ADIOS backend"); } -int64_t CommonADIOS1IOHandlerImpl::GetFileHandle(Writable* writable) +int64_t CommonADIOS1IOHandlerImpl::GetFileHandle(Writable *writable) { auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); int64_t fd; - if( m_openWriteFileHandles.find(res->second) == m_openWriteFileHandles.end() ) + if (m_openWriteFileHandles.find(res->second) == + m_openWriteFileHandles.end()) { - std::string name = *(res->second); + std::string name = *(res->second); m_groups[m_filePaths[writable]] = initialize_group(name); fd = open_write(writable); m_openWriteFileHandles[res->second] = fd; - } else + } + else fd = m_openWriteFileHandles.at(res->second); return fd; } -void -CommonADIOS1IOHandlerImpl::writeDataset(Writable* writable, - Parameter< Operation::WRITE_DATASET > const& parameters) +void CommonADIOS1IOHandlerImpl::writeDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Writing into a dataset in a file opened as read-only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Writing into a dataset in a file opened as read-only is " + "not possible."); int64_t fd = GetFileHandle(writable); @@ -940,568 +969,651 @@ CommonADIOS1IOHandlerImpl::writeDataset(Writable* writable, std::string chunkSize; std::string chunkOffset; int status; - for( size_t i = 0; i < ndims; ++i ) + for (size_t i = 0; i < ndims; ++i) { chunkSize = "/tmp" + name + "_chunkSize" + std::to_string(i); status = adios_write(fd, chunkSize.c_str(), ¶meters.extent[i]); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to write ADIOS variable during Dataset writing"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to write ADIOS variable during " + "Dataset writing"); chunkOffset = "/tmp" + name + "_chunkOffset" + std::to_string(i); status = adios_write(fd, chunkOffset.c_str(), ¶meters.offset[i]); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to write ADIOS variable during Dataset writing"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to write ADIOS variable during " + "Dataset writing"); } - status = adios_write(fd, - name.c_str(), - parameters.data.get()); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to write ADIOS variable during Dataset writing"); + status = adios_write(fd, name.c_str(), parameters.data.get()); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to write ADIOS variable during " + "Dataset writing"); } -void -CommonADIOS1IOHandlerImpl::writeAttribute(Writable* writable, - Parameter< Operation::WRITE_ATT > const& parameters) +void CommonADIOS1IOHandlerImpl::writeAttribute( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Writing an attribute in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Writing an attribute in a file opened as read only is " + "not possible."); std::string name = concrete_bp1_file_position(writable); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) name += '/'; name += parameters.name; auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); GetFileHandle(writable); int64_t group = m_groups[res->second]; - auto& attributes = m_attributeWrites[group]; + auto &attributes = m_attributeWrites[group]; attributes.erase(name); attributes.emplace(name, parameters.resource); } -void -CommonADIOS1IOHandlerImpl::readDataset(Writable* writable, - Parameter< Operation::READ_DATASET >& parameters) +void CommonADIOS1IOHandlerImpl::readDataset( + Writable *writable, Parameter ¶meters) { - switch( parameters.dtype ) + switch (parameters.dtype) { using DT = Datatype; - case DT::DOUBLE: - case DT::FLOAT: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::BOOL: - break; - case DT::UNDEFINED: - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (ADIOS1 Dataset read)"); - case DT::DATATYPE: - throw std::runtime_error("[ADIOS1] Meta-Datatype leaked into IO"); - default: - throw std::runtime_error("[ADIOS1] Datatype not implemented in ADIOS1 IO"); + case DT::DOUBLE: + case DT::FLOAT: + case DT::CDOUBLE: + case DT::CFLOAT: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::CHAR: + case DT::UCHAR: + case DT::BOOL: + break; + case DT::UNDEFINED: + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (ADIOS1 Dataset read)"); + case DT::DATATYPE: + throw std::runtime_error("[ADIOS1] Meta-Datatype leaked into IO"); + default: + throw std::runtime_error( + "[ADIOS1] Datatype not implemented in ADIOS1 IO"); } - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); - VERIFY(std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, - "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); - - ADIOS_SELECTION* sel; - sel = adios_selection_boundingbox(parameters.extent.size(), - parameters.offset.data(), - parameters.extent.data()); - VERIFY(sel != nullptr, "[ADIOS1] Internal error: Failed to select ADIOS bounding box during dataset reading"); - VERIFY(adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to select ADIOS bounding box during dataset reading"); + VERIFY( + std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, + "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); + + ADIOS_SELECTION *sel; + sel = adios_selection_boundingbox( + parameters.extent.size(), + parameters.offset.data(), + parameters.extent.data()); + VERIFY( + sel != nullptr, + "[ADIOS1] Internal error: Failed to select ADIOS bounding box during " + "dataset reading"); + VERIFY( + adios_errno == err_no_error, + "[ADIOS1] Internal error: Failed to select ADIOS bounding box during " + "dataset reading"); std::string varname = concrete_bp1_file_position(writable); - void* data = parameters.data.get(); + void *data = parameters.data.get(); int status; - status = adios_schedule_read(f, - sel, - varname.c_str(), - 0, - 1, - data); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset reading"); - VERIFY(adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset reading"); - - m_scheduledReads[f].push_back(sel); + status = adios_schedule_read(f, sel, varname.c_str(), 0, 1, data); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset " + "reading"); + VERIFY( + adios_errno == err_no_error, + "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset " + "reading"); + + m_scheduledReads[f].push_back({sel, parameters.data}); } -void -CommonADIOS1IOHandlerImpl::readAttribute(Writable* writable, - Parameter< Operation::READ_ATT >& parameters) +void CommonADIOS1IOHandlerImpl::readAttribute( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during attribute reading"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during " + "attribute reading"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string attrname = concrete_bp1_file_position(writable); - if( !auxiliary::ends_with(attrname, '/') ) + if (!auxiliary::ends_with(attrname, '/')) attrname += "/"; attrname += parameters.name; ADIOS_DATATYPES datatype = adios_unknown; int size = 0; - void* data = nullptr; + void *data = nullptr; int status; - status = adios_get_attr(f, - attrname.c_str(), - &datatype, - &size, - &data); - VERIFY(status == 0, "[ADIOS1] Internal error: Failed to get ADIOS1 attribute during attribute read"); - VERIFY(datatype != adios_unknown, "[ADIOS1] Internal error: Read unknown ADIOS1 datatype during attribute read"); + status = adios_get_attr(f, attrname.c_str(), &datatype, &size, &data); + VERIFY( + status == 0, + "[ADIOS1] Internal error: Failed to get ADIOS1 attribute during " + "attribute read"); + VERIFY( + datatype != adios_unknown, + "[ADIOS1] Internal error: Read unknown ADIOS1 datatype during " + "attribute read"); VERIFY(size != 0, "[ADIOS1] Internal error: ADIOS1 read 0-size attribute"); // size is returned in number of allocated bytes // note the ill-named fixed-byte adios_... types // https://github.com/ornladios/ADIOS/issues/187 - switch( datatype ) + switch (datatype) + { + case adios_byte: + break; + case adios_short: + size /= 2; + break; + case adios_integer: + size /= 4; + break; + case adios_long: + size /= 8; + break; + case adios_unsigned_byte: + break; + case adios_unsigned_short: + size /= 2; + break; + case adios_unsigned_integer: + size /= 4; + break; + case adios_unsigned_long: + size /= 8; + break; + case adios_real: + size /= 4; + break; + case adios_double: + size /= 8; + break; + case adios_long_double: + size /= sizeof(long double); + break; + case adios_complex: + size /= 8; + break; + case adios_double_complex: + size /= 16; + break; + case adios_string: + break; + case adios_string_array: + size /= sizeof(char *); + break; + + default: + throw unsupported_data_error( + "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + + std::to_string(datatype) + "' in size check"); + } + + Datatype dtype; + Attribute a(0); + if (size == 1) { + switch (datatype) + { + using DT = Datatype; case adios_byte: + dtype = DT::CHAR; + a = Attribute(*reinterpret_cast(data)); break; case adios_short: - size /= 2; + if (sizeof(short) == 2u) + { + dtype = DT::SHORT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(int) == 2u) + { + dtype = DT::INT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long) == 2u) + { + dtype = DT::LONG; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long long) == 2u) + { + dtype = DT::LONGLONG; + a = Attribute(*reinterpret_cast(data)); + } + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_short " + "found."); break; case adios_integer: - size /= 4; + if (sizeof(short) == 4u) + { + dtype = DT::SHORT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(int) == 4u) + { + dtype = DT::INT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long) == 4u) + { + dtype = DT::LONG; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long long) == 4u) + { + dtype = DT::LONGLONG; + a = Attribute(*reinterpret_cast(data)); + } + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_integer " + "found."); break; case adios_long: - size /= 8; + if (sizeof(short) == 8u) + { + dtype = DT::SHORT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(int) == 8u) + { + dtype = DT::INT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long) == 8u) + { + dtype = DT::LONG; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long long) == 8u) + { + dtype = DT::LONGLONG; + a = Attribute(*reinterpret_cast(data)); + } + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_long " + "found."); break; case adios_unsigned_byte: + dtype = DT::UCHAR; + a = Attribute(*reinterpret_cast(data)); break; case adios_unsigned_short: - size /= 2; - break; - case adios_unsigned_integer: - size /= 4; - break; - case adios_unsigned_long: - size /= 8; - break; - case adios_real: - size /= 4; - break; - case adios_double: - size /= 8; - break; - case adios_long_double: - size /= sizeof(long double); - break; - case adios_complex: - size /= 8; - break; - case adios_double_complex: - size /= 16; - break; - case adios_string: - break; - case adios_string_array: - size /= sizeof(char*); - break; - - default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in size check"); - } - - Datatype dtype; - Attribute a(0); - if( size == 1 ) - { - switch( datatype ) - { - using DT = Datatype; - case adios_byte: - dtype = DT::CHAR; - a = Attribute(*reinterpret_cast< char* >(data)); - break; - case adios_short: - if( sizeof(short) == 2u ) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast< short* >(data)); - } - else if( sizeof(int) == 2u ) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast< int* >(data)); - } - else if( sizeof(long) == 2u ) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast< long* >(data)); - } - else if( sizeof(long long) == 2u ) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast< long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_short found."); - break; - case adios_integer: - if( sizeof(short) == 4u ) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast< short* >(data)); - } - else if( sizeof(int) == 4u ) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast< int* >(data)); - } - else if( sizeof(long) == 4u ) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast< long* >(data)); - } - else if( sizeof(long long) == 4u ) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast< long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_integer found."); - break; - case adios_long: - if( sizeof(short) == 8u ) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast< short* >(data)); - } - else if( sizeof(int) == 8u ) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast< int* >(data)); - } - else if( sizeof(long) == 8u ) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast< long* >(data)); - } - else if( sizeof(long long) == 8u ) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast< long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_long found."); - break; - case adios_unsigned_byte: - dtype = DT::UCHAR; - a = Attribute(*reinterpret_cast< unsigned char* >(data)); - break; - case adios_unsigned_short: - if( sizeof(unsigned short) == 2u ) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast< unsigned short* >(data)); - } - else if( sizeof(unsigned int) == 2u ) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast< unsigned int* >(data)); - } - else if( sizeof(unsigned long) == 2u ) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast< unsigned long* >(data)); - } - else if( sizeof(unsigned long long) == 2u ) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast< unsigned long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_short found."); - break; - case adios_unsigned_integer: - if( sizeof(unsigned short) == 4u ) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast< unsigned short* >(data)); - } - else if( sizeof(unsigned int) == 4u ) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast< unsigned int* >(data)); - } - else if( sizeof(unsigned long) == 4u ) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast< unsigned long* >(data)); - } - else if( sizeof(unsigned long long) == 4u ) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast< unsigned long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_integer found."); - break; - case adios_unsigned_long: - if( sizeof(unsigned short) == 8u ) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast< unsigned short* >(data)); - } - else if( sizeof(unsigned int) == 8u ) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast< unsigned int* >(data)); - } - else if( sizeof(unsigned long) == 8u ) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast< unsigned long* >(data)); - } - else if( sizeof(unsigned long long) == 8u ) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast< unsigned long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_long found."); - break; - case adios_real: - dtype = DT::FLOAT; - a = Attribute(*reinterpret_cast< float* >(data)); - break; - case adios_double: - dtype = DT::DOUBLE; - a = Attribute(*reinterpret_cast< double* >(data)); - break; - case adios_long_double: - dtype = DT::LONG_DOUBLE; - a = Attribute(*reinterpret_cast< long double* >(data)); - break; - case adios_complex: - dtype = DT::CFLOAT; - a = Attribute(*reinterpret_cast< std::complex* >(data)); - break; - case adios_double_complex: - dtype = DT::CDOUBLE; - a = Attribute(*reinterpret_cast< std::complex* >(data)); - break; - case adios_string: - { - dtype = DT::STRING; - auto c = reinterpret_cast< char* >(data); - a = Attribute(auxiliary::strip(std::string(c, std::strlen(c)), {'\0'})); - break; - } - case adios_string_array: + if (sizeof(unsigned short) == 2u) { - dtype = DT::VEC_STRING; - auto c = reinterpret_cast< char** >(data); - std::vector< std::string > vs; - vs.resize(size); - for( int i = 0; i < size; ++i ) - { - vs[i] = auxiliary::strip(std::string(c[i], std::strlen(c[i])), {'\0'}); - /** @todo pointer should be freed, but this causes memory corruption */ - //free(c[i]); - } - a = Attribute(vs); - break; + dtype = DT::USHORT; + a = Attribute(*reinterpret_cast(data)); } - default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in scalar branch"); - } - } - else - { - switch( datatype ) - { - using DT = Datatype; - case adios_byte: + else if (sizeof(unsigned int) == 2u) { - dtype = DT::VEC_CHAR; - auto c = reinterpret_cast< char* >(data); - std::vector< char > vc; - vc.resize(size); - for( int i = 0; i < size; ++i ) - vc[i] = c[i]; - a = Attribute(vc); - break; + dtype = DT::UINT; + a = Attribute(*reinterpret_cast(data)); } - case adios_short: + else if (sizeof(unsigned long) == 2u) { - if( sizeof(short) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< short >(data, size), DT::VEC_SHORT); - else if( sizeof(int) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< int >(data, size), DT::VEC_INT); - else if( sizeof(long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long >(data, size), DT::VEC_LONG); - else if( sizeof(long long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long long >(data, size), DT::VEC_LONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_short found."); - break; + dtype = DT::ULONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_integer: + else if (sizeof(unsigned long long) == 2u) { - if( sizeof(short) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< short >(data, size), DT::VEC_SHORT); - else if( sizeof(int) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< int >(data, size), DT::VEC_INT); - else if( sizeof(long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long >(data, size), DT::VEC_LONG); - else if( sizeof(long long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long long >(data, size), DT::VEC_LONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_integer found."); - break; + dtype = DT::ULONGLONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_long: + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_short found."); + break; + case adios_unsigned_integer: + if (sizeof(unsigned short) == 4u) { - if( sizeof(short) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< short >(data, size), DT::VEC_SHORT); - else if( sizeof(int) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< int >(data, size), DT::VEC_INT); - else if( sizeof(long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long >(data, size), DT::VEC_LONG); - else if( sizeof(long long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long long >(data, size), DT::VEC_LONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_long found."); - break; + dtype = DT::USHORT; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_byte: + else if (sizeof(unsigned int) == 4u) { - dtype = DT::VEC_UCHAR; - auto uc = reinterpret_cast< unsigned char* >(data); - std::vector< unsigned char > vuc; - vuc.resize(size); - for( int i = 0; i < size; ++i ) - vuc[i] = uc[i]; - a = Attribute(vuc); - break; + dtype = DT::UINT; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_short: + else if (sizeof(unsigned long) == 4u) { - if( sizeof(unsigned short) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned short >(data, size), DT::VEC_USHORT); - else if( sizeof(unsigned int) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned int >(data, size), DT::VEC_UINT); - else if( sizeof(unsigned long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long >(data, size), DT::VEC_ULONG); - else if( sizeof(unsigned long long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long long >(data, size), DT::VEC_ULONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_short found."); - break; + dtype = DT::ULONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_integer: + else if (sizeof(unsigned long long) == 4u) { - if( sizeof(unsigned short) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned short >(data, size), DT::VEC_USHORT); - else if( sizeof(unsigned int) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned int >(data, size), DT::VEC_UINT); - else if( sizeof(unsigned long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long >(data, size), DT::VEC_ULONG); - else if( sizeof(unsigned long long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long long >(data, size), DT::VEC_ULONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_integer found."); - break; + dtype = DT::ULONGLONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_long: + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_integer found."); + break; + case adios_unsigned_long: + if (sizeof(unsigned short) == 8u) { - if( sizeof(unsigned short) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned short >(data, size), DT::VEC_USHORT); - else if( sizeof(unsigned int) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned int >(data, size), DT::VEC_UINT); - else if( sizeof(unsigned long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long >(data, size), DT::VEC_ULONG); - else if( sizeof(unsigned long long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long long >(data, size), DT::VEC_ULONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_long found."); - break; + dtype = DT::USHORT; + a = Attribute(*reinterpret_cast(data)); } - case adios_real: + else if (sizeof(unsigned int) == 8u) { - dtype = DT::VEC_FLOAT; - auto f4 = reinterpret_cast< float* >(data); - std::vector< float > vf; - vf.resize(size); - for( int i = 0; i < size; ++i ) - vf[i] = f4[i]; - a = Attribute(vf); - break; + dtype = DT::UINT; + a = Attribute(*reinterpret_cast(data)); } - case adios_double: + else if (sizeof(unsigned long) == 8u) { - dtype = DT::VEC_DOUBLE; - auto d8 = reinterpret_cast< double* >(data); - std::vector< double > vd; - vd.resize(size); - for( int i = 0; i < size; ++i ) - vd[i] = d8[i]; - a = Attribute(vd); - break; + dtype = DT::ULONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_long_double: + else if (sizeof(unsigned long long) == 8u) { - dtype = DT::VEC_LONG_DOUBLE; - auto ld = reinterpret_cast< long double* >(data); - std::vector< long double > vld; - vld.resize(size); - for( int i = 0; i < size; ++i ) - vld[i] = ld[i]; - a = Attribute(vld); - break; + dtype = DT::ULONGLONG; + a = Attribute(*reinterpret_cast(data)); } - /* not supported by ADIOS 1.13.1: VEC_CFLOAT, VEC_CDOUBLE, VEC_CLONG_DOUBLE - * https://github.com/ornladios/ADIOS/issues/212 - */ - case adios_string: + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_long found."); + break; + case adios_real: + dtype = DT::FLOAT; + a = Attribute(*reinterpret_cast(data)); + break; + case adios_double: + dtype = DT::DOUBLE; + a = Attribute(*reinterpret_cast(data)); + break; + case adios_long_double: + dtype = DT::LONG_DOUBLE; + a = Attribute(*reinterpret_cast(data)); + break; + case adios_complex: + dtype = DT::CFLOAT; + a = Attribute(*reinterpret_cast *>(data)); + break; + case adios_double_complex: + dtype = DT::CDOUBLE; + a = Attribute(*reinterpret_cast *>(data)); + break; + case adios_string: { + dtype = DT::STRING; + auto c = reinterpret_cast(data); + a = Attribute( + auxiliary::strip(std::string(c, std::strlen(c)), {'\0'})); + break; + } + case adios_string_array: { + dtype = DT::VEC_STRING; + auto c = reinterpret_cast(data); + std::vector vs; + vs.resize(size); + for (int i = 0; i < size; ++i) { - dtype = DT::STRING; - a = Attribute(auxiliary::strip(std::string(reinterpret_cast< char* >(data), size), {'\0'})); - break; + vs[i] = auxiliary::strip( + std::string(c[i], std::strlen(c[i])), {'\0'}); + /** @todo pointer should be freed, but this causes memory + * corruption */ + // free(c[i]); } - case adios_string_array: + a = Attribute(vs); + break; + } + default: + throw unsupported_data_error( + "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype " + "'" + + std::to_string(datatype) + "' in scalar branch"); + } + } + else + { + switch (datatype) + { + using DT = Datatype; + case adios_byte: { + dtype = DT::VEC_CHAR; + auto c = reinterpret_cast(data); + std::vector vc; + vc.resize(size); + for (int i = 0; i < size; ++i) + vc[i] = c[i]; + a = Attribute(vc); + break; + } + case adios_short: { + if (sizeof(short) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_SHORT); + else if (sizeof(int) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), DT::VEC_INT); + else if (sizeof(long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONG); + else if (sizeof(long long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_short " + "found."); + break; + } + case adios_integer: { + if (sizeof(short) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_SHORT); + else if (sizeof(int) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), DT::VEC_INT); + else if (sizeof(long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONG); + else if (sizeof(long long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_integer " + "found."); + break; + } + case adios_long: { + if (sizeof(short) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_SHORT); + else if (sizeof(int) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), DT::VEC_INT); + else if (sizeof(long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONG); + else if (sizeof(long long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_long " + "found."); + break; + } + case adios_unsigned_byte: { + dtype = DT::VEC_UCHAR; + auto uc = reinterpret_cast(data); + std::vector vuc; + vuc.resize(size); + for (int i = 0; i < size; ++i) + vuc[i] = uc[i]; + a = Attribute(vuc); + break; + } + case adios_unsigned_short: { + if (sizeof(unsigned short) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_USHORT); + else if (sizeof(unsigned int) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_UINT); + else if (sizeof(unsigned long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONG); + else if (sizeof(unsigned long long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_short found."); + break; + } + case adios_unsigned_integer: { + if (sizeof(unsigned short) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_USHORT); + else if (sizeof(unsigned int) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_UINT); + else if (sizeof(unsigned long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONG); + else if (sizeof(unsigned long long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_integer found."); + break; + } + case adios_unsigned_long: { + if (sizeof(unsigned short) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_USHORT); + else if (sizeof(unsigned int) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_UINT); + else if (sizeof(unsigned long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONG); + else if (sizeof(unsigned long long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_long found."); + break; + } + case adios_real: { + dtype = DT::VEC_FLOAT; + auto f4 = reinterpret_cast(data); + std::vector vf; + vf.resize(size); + for (int i = 0; i < size; ++i) + vf[i] = f4[i]; + a = Attribute(vf); + break; + } + case adios_double: { + dtype = DT::VEC_DOUBLE; + auto d8 = reinterpret_cast(data); + std::vector vd; + vd.resize(size); + for (int i = 0; i < size; ++i) + vd[i] = d8[i]; + a = Attribute(vd); + break; + } + case adios_long_double: { + dtype = DT::VEC_LONG_DOUBLE; + auto ld = reinterpret_cast(data); + std::vector vld; + vld.resize(size); + for (int i = 0; i < size; ++i) + vld[i] = ld[i]; + a = Attribute(vld); + break; + } + /* not supported by ADIOS 1.13.1: VEC_CFLOAT, VEC_CDOUBLE, + * VEC_CLONG_DOUBLE https://github.com/ornladios/ADIOS/issues/212 + */ + case adios_string: { + dtype = DT::STRING; + a = Attribute(auxiliary::strip( + std::string(reinterpret_cast(data), size), {'\0'})); + break; + } + case adios_string_array: { + dtype = DT::VEC_STRING; + auto c = reinterpret_cast(data); + std::vector vs; + vs.resize(size); + for (int i = 0; i < size; ++i) { - dtype = DT::VEC_STRING; - auto c = reinterpret_cast< char** >(data); - std::vector< std::string > vs; - vs.resize(size); - for( int i = 0; i < size; ++i ) - { - vs[i] = auxiliary::strip(std::string(c[i], std::strlen(c[i])), {'\0'}); - /** @todo pointer should be freed, but this causes memory corruption */ - //free(c[i]); - } - a = Attribute(vs); - break; + vs[i] = auxiliary::strip( + std::string(c[i], std::strlen(c[i])), {'\0'}); + /** @todo pointer should be freed, but this causes memory + * corruption */ + // free(c[i]); } + a = Attribute(vs); + break; + } - default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in vector branch"); + default: + throw unsupported_data_error( + "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype " + "'" + + std::to_string(datatype) + "' in vector branch"); } } @@ -1511,30 +1623,32 @@ CommonADIOS1IOHandlerImpl::readAttribute(Writable* writable, *parameters.resource = a.getResource(); } -void -CommonADIOS1IOHandlerImpl::listPaths(Writable* writable, - Parameter< Operation::LIST_PATHS >& parameters) +void CommonADIOS1IOHandlerImpl::listPaths( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during path listing"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during path " + "listing"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string name = concrete_bp1_file_position(writable); - std::unordered_set< std::string > paths; - std::unordered_set< std::string > variables; - for( int i = 0; i < f->nvars; ++i ) + std::unordered_set paths; + std::unordered_set variables; + for (int i = 0; i < f->nvars; ++i) { - char* str = f->var_namelist[i]; + char *str = f->var_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); variables.emplace(s); - if( std::any_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::any_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* there are more path levels after the current writable */ s = s.substr(0, s.find_first_of('/')); @@ -1542,21 +1656,25 @@ CommonADIOS1IOHandlerImpl::listPaths(Writable* writable, } } } - for( int i = 0; i < f->nattrs; ++i ) + for (int i = 0; i < f->nattrs; ++i) { - char* str = f->attr_namelist[i]; + char *str = f->attr_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); - if( std::any_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::any_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* remove the attribute name */ s = s.substr(0, s.find_last_of('/')); - if( !std::any_of(variables.begin(), - variables.end(), - [&s](std::string const& var){ return auxiliary::starts_with(var, s); })) + if (!std::any_of( + variables.begin(), + variables.end(), + [&s](std::string const &var) { + return auxiliary::starts_with(var, s); + })) { /* this is either a group or a constant scalar */ s = s.substr(0, s.find_first_of('/')); @@ -1566,31 +1684,33 @@ CommonADIOS1IOHandlerImpl::listPaths(Writable* writable, } } - *parameters.paths = std::vector< std::string >(paths.begin(), paths.end()); + *parameters.paths = std::vector(paths.begin(), paths.end()); } -void -CommonADIOS1IOHandlerImpl::listDatasets(Writable* writable, - Parameter< Operation::LIST_DATASETS >& parameters) +void CommonADIOS1IOHandlerImpl::listDatasets( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during dataset listing"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during " + "dataset listing"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string name = concrete_bp1_file_position(writable); - std::unordered_set< std::string > paths; - for( int i = 0; i < f->nvars; ++i ) + std::unordered_set paths; + for (int i = 0; i < f->nvars; ++i) { - char* str = f->var_namelist[i]; + char *str = f->var_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); - if( std::none_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::none_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* this is a dataset of the writable */ paths.emplace(s); @@ -1598,58 +1718,68 @@ CommonADIOS1IOHandlerImpl::listDatasets(Writable* writable, } } - *parameters.datasets = std::vector< std::string >(paths.begin(), paths.end()); + *parameters.datasets = std::vector(paths.begin(), paths.end()); } -void -CommonADIOS1IOHandlerImpl::listAttributes(Writable* writable, - Parameter< Operation::LIST_ATTS >& parameters) +void CommonADIOS1IOHandlerImpl::listAttributes( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during attribute listing"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during " + "attribute listing"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string name = concrete_bp1_file_position(writable); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) { /* writable is a dataset and corresponds to an ADIOS variable */ - ADIOS_VARINFO* info; - info = adios_inq_var(f, - name.c_str()); - VERIFY(adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to inquire ADIOS variable during attribute listing"); - VERIFY(info != nullptr, "[ADIOS1] Internal error: Failed to inquire ADIOS variable during attribute listing"); + ADIOS_VARINFO *info; + info = adios_inq_var(f, name.c_str()); + VERIFY( + adios_errno == err_no_error, + "[ADIOS1] Internal error: Failed to inquire ADIOS variable during " + "attribute listing"); + VERIFY( + info != nullptr, + "[ADIOS1] Internal error: Failed to inquire ADIOS variable during " + "attribute listing"); name += '/'; parameters.attributes->reserve(info->nattrs); - for( int i = 0; i < info->nattrs; ++i ) + for (int i = 0; i < info->nattrs; ++i) { - char* c = f->attr_namelist[info->attr_ids[i]]; - parameters.attributes->push_back(auxiliary::replace_first(std::string(c, std::strlen(c)), name, "")); + char *c = f->attr_namelist[info->attr_ids[i]]; + parameters.attributes->push_back(auxiliary::replace_first( + std::string(c, std::strlen(c)), name, "")); } adios_free_varinfo(info); - } else + } + else { /* there is no ADIOS variable associated with the writable */ - std::unordered_set< std::string > attributes; - for( int i = 0; i < f->nattrs; ++i ) + std::unordered_set attributes; + for (int i = 0; i < f->nattrs; ++i) { - char* str = f->attr_namelist[i]; + char *str = f->attr_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); - if( std::none_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::none_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* this is an attribute of the writable */ attributes.insert(s); } } } - *parameters.attributes = std::vector< std::string >(attributes.begin(), attributes.end()); + *parameters.attributes = + std::vector(attributes.begin(), attributes.end()); } } diff --git a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp index 362784b1fd..75033ea520 100644 --- a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp @@ -22,49 +22,58 @@ #include "openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp" #if openPMD_HAVE_MPI && openPMD_HAVE_ADIOS1 -# include "openPMD/auxiliary/Filesystem.hpp" -# include "openPMD/auxiliary/DerefDynamicCast.hpp" -# include "openPMD/auxiliary/Memory.hpp" -# include "openPMD/auxiliary/StringManip.hpp" -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" -# include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" -# include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" -# include "openPMD/IO/IOTask.hpp" -# include -# include -# include -# include +#include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" +#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/IO/IOTask.hpp" +#include "openPMD/auxiliary/DerefDynamicCast.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include +#include +#include +#include #endif namespace openPMD { #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif - -ParallelADIOS1IOHandlerImpl::ParallelADIOS1IOHandlerImpl(AbstractIOHandler* handler, - MPI_Comm comm) - : AbstractIOHandlerImpl{handler}, - m_mpiInfo{MPI_INFO_NULL} +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif + +ParallelADIOS1IOHandlerImpl::ParallelADIOS1IOHandlerImpl( + AbstractIOHandler *handler, MPI_Comm comm) + : AbstractIOHandlerImpl{handler}, m_mpiInfo{MPI_INFO_NULL} { int status = MPI_SUCCESS; status = MPI_Comm_dup(comm, &m_mpiComm); - VERIFY(status == MPI_SUCCESS, "[ADIOS1] Internal error: Failed to duplicate MPI communicator"); + VERIFY( + status == MPI_SUCCESS, + "[ADIOS1] Internal error: Failed to duplicate MPI communicator"); } ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() { - for( auto& f : m_openReadFileHandles ) + for (auto &f : m_openReadFileHandles) close(f.second); m_openReadFileHandles.clear(); - if( this->m_handler->m_backendAccess != Access::READ_ONLY ) + if (this->m_handler->m_backendAccess != Access::READ_ONLY) { - for( auto& group : m_attributeWrites ) - for( auto& att : group.second ) + for (auto &group : m_attributeWrites) + for (auto &att : group.second) flush_attribute(group.first, att.first, att.second); // unordered map caused the value of the same container @@ -72,11 +81,11 @@ ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() // which caused trouble with close(), which is collective // so I just sort by file name to force all processors close // all the fids in the same order - std::map< std::string, int64_t > allFiles; - for( auto& f : m_openWriteFileHandles ) + std::map allFiles; + for (auto &f : m_openWriteFileHandles) allFiles[*(f.first)] = f.second; - for( auto const& p : allFiles ) + for (auto const &p : allFiles) { auto const fid = p.second; close(fid); @@ -88,210 +97,298 @@ ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() int status; MPI_Barrier(m_mpiComm); status = adios_read_finalize_method(m_readMethod); - if( status != err_no_error ) - std::cerr << "Internal error: Failed to finalize ADIOS reading method (parallel)\n"; + if (status != err_no_error) + std::cerr << "Internal error: Failed to finalize ADIOS reading method " + "(parallel)\n"; MPI_Barrier(m_mpiComm); int rank = 0; MPI_Comm_rank(m_mpiComm, &rank); status = adios_finalize(rank); - if( status != err_no_error ) + if (status != err_no_error) std::cerr << "Internal error: Failed to finalize ADIOS (parallel)\n"; MPI_Comm_free(&m_mpiComm); } -std::future< void > -ParallelADIOS1IOHandlerImpl::flush() +std::future ParallelADIOS1IOHandlerImpl::flush() { using namespace auxiliary; - auto handler = dynamic_cast< ParallelADIOS1IOHandler* >(m_handler); - while( !handler->m_setup.empty() ) + auto handler = dynamic_cast(m_handler); + while (!handler->m_setup.empty()) { - IOTask& i = handler->m_setup.front(); + IOTask &i = handler->m_setup.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::CREATE_FILE: - createFile(i.writable, deref_dynamic_cast< Parameter< Operation::CREATE_FILE > >(i.parameter.get())); - break; - case O::CREATE_PATH: - createPath(i.writable, deref_dynamic_cast< Parameter< O::CREATE_PATH > >(i.parameter.get())); - break; - case O::OPEN_PATH: - openPath(i.writable, deref_dynamic_cast< Parameter< O::OPEN_PATH > >(i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset(i.writable, deref_dynamic_cast< Parameter< O::CREATE_DATASET > >(i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute(i.writable, deref_dynamic_cast< Parameter< O::WRITE_ATT > >(i.parameter.get())); - break; - case O::OPEN_FILE: - openFile(i.writable, deref_dynamic_cast< Parameter< O::OPEN_FILE > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS setup queue"); + case O::CREATE_FILE: + createFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CREATE_PATH: + createPath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_PATH: + openPath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CREATE_DATASET: + createDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::WRITE_ATT: + writeAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_FILE: + openFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS setup " + "queue"); } - } catch (unsupported_data_error& e) + } + catch (...) { + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; handler->m_setup.pop(); throw; } handler->m_setup.pop(); } - - while( !handler->m_work.empty() ) + while (!handler->m_work.empty()) { - IOTask& i = handler->m_work.front(); + IOTask &i = handler->m_work.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::EXTEND_DATASET: - extendDataset(i.writable, deref_dynamic_cast< Parameter< O::EXTEND_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_PATH > >(i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset(i.writable, deref_dynamic_cast< Parameter< O::OPEN_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile(i.writable, *dynamic_cast< Parameter< O::CLOSE_FILE >* >(i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile(i.writable, deref_dynamic_cast< Parameter< O::DELETE_FILE > >(i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath(i.writable, deref_dynamic_cast< Parameter< O::DELETE_PATH > >(i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset(i.writable, deref_dynamic_cast< Parameter< O::DELETE_DATASET > >(i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute(i.writable, deref_dynamic_cast< Parameter< O::DELETE_ATT > >(i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset(i.writable, deref_dynamic_cast< Parameter< O::WRITE_DATASET > >(i.parameter.get())); - break; - case O::READ_DATASET: - readDataset(i.writable, deref_dynamic_cast< Parameter< O::READ_DATASET > >(i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView(i.writable, deref_dynamic_cast< Parameter< O::GET_BUFFER_VIEW > >(i.parameter.get())); - break; - case O::READ_ATT: - readAttribute(i.writable, deref_dynamic_cast< Parameter< O::READ_ATT > >(i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths(i.writable, deref_dynamic_cast< Parameter< O::LIST_PATHS > >(i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets(i.writable, deref_dynamic_cast< Parameter< O::LIST_DATASETS > >(i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes(i.writable, deref_dynamic_cast< Parameter< O::LIST_ATTS > >(i.parameter.get())); - break; - case O::ADVANCE: - advance(i.writable, deref_dynamic_cast< Parameter< O::ADVANCE > >(i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks(i.writable, deref_dynamic_cast< Parameter< O::AVAILABLE_CHUNKS > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS work queue"); + case O::EXTEND_DATASET: + extendDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CLOSE_PATH: + closePath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::OPEN_DATASET: + openDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::CLOSE_FILE: + closeFile( + i.writable, + *dynamic_cast *>( + i.parameter.get())); + break; + case O::DELETE_FILE: + deleteFile( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_PATH: + deletePath( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_DATASET: + deleteDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::DELETE_ATT: + deleteAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::WRITE_DATASET: + writeDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::READ_DATASET: + readDataset( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::GET_BUFFER_VIEW: + getBufferView( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::READ_ATT: + readAttribute( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_PATHS: + listPaths( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_DATASETS: + listDatasets( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::LIST_ATTS: + listAttributes( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::ADVANCE: + advance( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + case O::AVAILABLE_CHUNKS: + availableChunks( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS work " + "queue"); } - } catch (unsupported_data_error& e) + } + catch (...) { - handler->m_work.pop(); + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; + m_handler->m_work.pop(); throw; } handler->m_work.pop(); } int status; - for( auto& file : m_scheduledReads ) + for (auto &file : m_scheduledReads) { - status = adios_perform_reads(file.first, - 1); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to perform ADIOS reads during dataset reading"); - - for( auto& sel : file.second ) - adios_selection_delete(sel); + status = adios_perform_reads(file.first, 1); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to perform ADIOS reads during " + "dataset reading"); + + for (auto &sel : file.second) + adios_selection_delete(sel.selection); } m_scheduledReads.clear(); - return std::future< void >(); + return std::future(); } -void -ParallelADIOS1IOHandlerImpl::init() +void ParallelADIOS1IOHandlerImpl::init() { int status; status = adios_init_noxml(m_mpiComm); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS"); /** @todo ADIOS_READ_METHOD_BP_AGGREGATE */ m_readMethod = ADIOS_READ_METHOD_BP; status = adios_read_init_method(m_readMethod, m_mpiComm, ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); } -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, - Access at, - MPI_Comm comm) - : AbstractIOHandler(std::move(path), at, comm), - m_impl{new ParallelADIOS1IOHandlerImpl(this, comm)} +ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( + std::string path, Access at, MPI_Comm comm) + : AbstractIOHandler(std::move(path), at, comm) + , m_impl{new ParallelADIOS1IOHandlerImpl(this, comm)} { m_impl->init(); } ParallelADIOS1IOHandler::~ParallelADIOS1IOHandler() = default; -std::future< void > -ParallelADIOS1IOHandler::flush() +std::future ParallelADIOS1IOHandler::flush(internal::FlushParams const &) { return m_impl->flush(); } -void -ParallelADIOS1IOHandler::enqueue(IOTask const& i) +void ParallelADIOS1IOHandler::enqueue(IOTask const &i) { - switch( i.operation ) + switch (i.operation) { - case Operation::CREATE_FILE: - case Operation::CREATE_PATH: - case Operation::OPEN_PATH: - case Operation::CREATE_DATASET: - case Operation::OPEN_FILE: - case Operation::WRITE_ATT: - m_setup.push(i); - return; - default: - m_work.push(i); - return; + case Operation::CREATE_FILE: + case Operation::CREATE_PATH: + case Operation::OPEN_PATH: + case Operation::CREATE_DATASET: + case Operation::OPEN_FILE: + case Operation::WRITE_ATT: + m_setup.push(i); + return; + default: + m_work.push(i); + return; } } -int64_t -ParallelADIOS1IOHandlerImpl::open_write(Writable* writable) +int64_t ParallelADIOS1IOHandlerImpl::open_write(Writable *writable) { auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); std::string mode; - if( m_existsOnDisk[res->second] ) + if (m_existsOnDisk[res->second]) { mode = "u"; /* close the handle that corresponds to the file we want to append to */ - if( m_openReadFileHandles.find(res->second) != m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(res->second) != + m_openReadFileHandles.end()) { close(m_openReadFileHandles[res->second]); m_openReadFileHandles.erase(res->second); @@ -305,36 +402,41 @@ ParallelADIOS1IOHandlerImpl::open_write(Writable* writable) int64_t fd; int status; - status = adios_open(&fd, - res->second->c_str(), - res->second->c_str(), - mode.c_str(), - m_mpiComm); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to open_write ADIOS file"); + status = adios_open( + &fd, + res->second->c_str(), + res->second->c_str(), + mode.c_str(), + m_mpiComm); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to open_write ADIOS file"); return fd; } -ADIOS_FILE* -ParallelADIOS1IOHandlerImpl::open_read(std::string const & name) +ADIOS_FILE *ParallelADIOS1IOHandlerImpl::open_read(std::string const &name) { ADIOS_FILE *f; - f = adios_read_open_file(name.c_str(), - m_readMethod, - m_mpiComm); - VERIFY(adios_errno != err_file_not_found, "[ADIOS1] Internal error: ADIOS file not found"); - VERIFY(f != nullptr, "[ADIOS1] Internal error: Failed to open_read ADIOS file"); + f = adios_read_open_file(name.c_str(), m_readMethod, m_mpiComm); + VERIFY( + adios_errno != err_file_not_found, + "[ADIOS1] Internal error: ADIOS file not found"); + VERIFY( + f != nullptr, + "[ADIOS1] Internal error: Failed to open_read ADIOS file"); return f; } -int64_t -ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) +int64_t ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) { std::stringstream params; - params << "num_aggregators=" << getEnvNum("OPENPMD_ADIOS_NUM_AGGREGATORS", "1") + params << "num_aggregators=" + << getEnvNum("OPENPMD_ADIOS_NUM_AGGREGATORS", "1") << ";num_ost=" << getEnvNum("OPENPMD_ADIOS_NUM_OST", "0") - << ";have_metadata_file=" << getEnvNum("OPENPMD_ADIOS_HAVE_METADATA_FILE", "1") + << ";have_metadata_file=" + << getEnvNum("OPENPMD_ADIOS_HAVE_METADATA_FILE", "1") << ";verbose=2"; std::string params_str = params.str(); // important: copy out of temporary! @@ -342,9 +444,14 @@ ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) int64_t group; ADIOS_STATISTICS_FLAG noStatistics = adios_stat_no; status = adios_declare_group(&group, name.c_str(), "", noStatistics); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to declare ADIOS group"); - status = adios_select_method(group, "MPI_AGGREGATE", params_str.c_str(), ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to select ADIOS method"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to declare ADIOS group"); + status = + adios_select_method(group, "MPI_AGGREGATE", params_str.c_str(), ""); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to select ADIOS method"); return group; } @@ -355,34 +462,30 @@ ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) #endif #else -# if openPMD_HAVE_MPI -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, - Access at, - MPI_Comm comm) - : AbstractIOHandler(std::move(path), at, comm) +#if openPMD_HAVE_MPI +ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( + std::string path, Access at, MPI_Comm comm) + : AbstractIOHandler(std::move(path), at, comm) { throw std::runtime_error("openPMD-api built without ADIOS1 support"); } -# else -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, - Access at) - : AbstractIOHandler(std::move(path), at) +#else +ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, Access at) + : AbstractIOHandler(std::move(path), at) { - throw std::runtime_error("openPMD-api built without parallel ADIOS1 support"); + throw std::runtime_error( + "openPMD-api built without parallel ADIOS1 support"); } -# endif +#endif ParallelADIOS1IOHandler::~ParallelADIOS1IOHandler() = default; -std::future< void > -ParallelADIOS1IOHandler::flush() +std::future ParallelADIOS1IOHandler::flush(internal::FlushParams const &) { - return std::future< void >(); + return std::future(); } -void -ParallelADIOS1IOHandler::enqueue(IOTask const&) -{ -} +void ParallelADIOS1IOHandler::enqueue(IOTask const &) +{} #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/AbstractIOHandlerHelper.cpp b/src/IO/AbstractIOHandlerHelper.cpp index 25e2bfcd01..e053372e40 100644 --- a/src/IO/AbstractIOHandlerHelper.cpp +++ b/src/IO/AbstractIOHandlerHelper.cpp @@ -29,89 +29,112 @@ #include "openPMD/IO/JSON/JSONIOHandler.hpp" #include "openPMD/auxiliary/JSON.hpp" +#include +#include + namespace openPMD { -#if openPMD_HAVE_MPI - template<> - std::shared_ptr< AbstractIOHandler > - createIOHandler< nlohmann::json >( - std::string path, - Access access, - Format format, - MPI_Comm comm, - nlohmann::json options ) + +namespace +{ + template + std::shared_ptr + constructIOHandler(std::string const &backendName, Args &&...args) { - (void) options; - switch( format ) + if /* constexpr */ (enabled) + { + return std::make_shared(std::forward(args)...); + } + else { - case Format::HDF5: - return std::make_shared< ParallelHDF5IOHandler >( - path, access, comm, std::move( options ) ); - case Format::ADIOS1: -# if openPMD_HAVE_ADIOS1 - return std::make_shared< ParallelADIOS1IOHandler >( path, access, comm ); -# else - throw std::runtime_error("openPMD-api built without ADIOS1 support"); -# endif - case Format::ADIOS2: - return std::make_shared< ADIOS2IOHandler >( - path, access, comm, std::move( options ), "bp4" ); - case Format::ADIOS2_SST: - return std::make_shared< ADIOS2IOHandler >( - path, access, comm, std::move( options ), "sst" ); - case Format::ADIOS2_SSC: - return std::make_shared< ADIOS2IOHandler >( - path, access, comm, std::move( options ), "ssc" ); - default: - throw std::runtime_error( - "Unknown file format! Did you specify a file ending?" ); + throw std::runtime_error( + "openPMD-api built without support for " + "backend '" + + backendName + "'."); } + throw "Unreachable"; } -#endif +} // namespace - template<> - std::shared_ptr< AbstractIOHandler > - createIOHandler< nlohmann::json >( - std::string path, - Access access, - Format format, - nlohmann::json options ) +#if openPMD_HAVE_MPI +template <> +std::shared_ptr createIOHandler( + std::string path, + Access access, + Format format, + MPI_Comm comm, + nlohmann::json options) +{ + (void)options; + switch (format) { - (void) options; - switch( format ) - { - case Format::HDF5: - return std::make_shared< HDF5IOHandler >( - path, access, std::move( options ) ); - case Format::ADIOS1: + case Format::HDF5: + return constructIOHandler( + "HDF5", path, access, comm, std::move(options)); + case Format::ADIOS1: #if openPMD_HAVE_ADIOS1 - return std::make_shared< ADIOS1IOHandler >( path, access ); + return constructIOHandler( + "ADIOS1", path, access, comm); #else - throw std::runtime_error("openPMD-api built without ADIOS1 support"); + throw std::runtime_error("openPMD-api built without ADIOS1 support"); #endif -#if openPMD_HAVE_ADIOS2 - case Format::ADIOS2: - return std::make_shared< ADIOS2IOHandler >( - path, access, std::move( options ), "bp4" ); - case Format::ADIOS2_SST: - return std::make_shared< ADIOS2IOHandler >( - path, access, std::move( options ), "sst" ); - case Format::ADIOS2_SSC: - return std::make_shared< ADIOS2IOHandler >( - path, access, std::move( options ), "ssc" ); -#endif // openPMD_HAVE_ADIOS2 - case Format::JSON: - return std::make_shared< JSONIOHandler >( path, access ); - default: - throw std::runtime_error( - "Unknown file format! Did you specify a file ending?" ); - } + case Format::ADIOS2: + return constructIOHandler( + "ADIOS2", path, access, comm, std::move(options), "bp4"); + case Format::ADIOS2_SST: + return constructIOHandler( + "ADIOS2", path, access, comm, std::move(options), "sst"); + case Format::ADIOS2_SSC: + return constructIOHandler( + "ADIOS2", path, access, comm, std::move(options), "ssc"); + default: + throw std::runtime_error( + "Unknown file format! Did you specify a file ending?"); } +} +#endif - std::shared_ptr< AbstractIOHandler > - createIOHandler( std::string path, Access access, Format format ) +template <> +std::shared_ptr createIOHandler( + std::string path, Access access, Format format, nlohmann::json options) +{ + (void)options; + switch (format) { - return createIOHandler( - std::move( path ), access, format, nlohmann::json::object() ); + case Format::HDF5: + return constructIOHandler( + "HDF5", path, access, std::move(options)); + case Format::ADIOS1: +#if openPMD_HAVE_ADIOS1 + return constructIOHandler( + "ADIOS1", path, access); +#else + throw std::runtime_error("openPMD-api built without ADIOS1 support"); +#endif +#if openPMD_HAVE_ADIOS2 + case Format::ADIOS2: + return constructIOHandler( + "ADIOS2", path, access, std::move(options), "bp4"); + case Format::ADIOS2_SST: + return constructIOHandler( + "ADIOS2", path, access, std::move(options), "sst"); + case Format::ADIOS2_SSC: + return constructIOHandler( + "ADIOS2", path, access, std::move(options), "ssc"); +#endif + case Format::JSON: + return constructIOHandler( + "JSON", path, access); + default: + throw std::runtime_error( + "Unknown file format! Did you specify a file ending?"); } +} + +std::shared_ptr +createIOHandler(std::string path, Access access, Format format) +{ + return createIOHandler( + std::move(path), access, format, nlohmann::json::object()); +} } // namespace openPMD diff --git a/src/IO/DummyIOHandler.cpp b/src/IO/DummyIOHandler.cpp index 3a7fd9559f..308f584ce4 100644 --- a/src/IO/DummyIOHandler.cpp +++ b/src/IO/DummyIOHandler.cpp @@ -23,19 +23,17 @@ #include #include - namespace openPMD { - DummyIOHandler::DummyIOHandler(std::string path, Access at) - : AbstractIOHandler(std::move(path), at) - { } +DummyIOHandler::DummyIOHandler(std::string path, Access at) + : AbstractIOHandler(std::move(path), at) +{} - void DummyIOHandler::enqueue(IOTask const&) - { } +void DummyIOHandler::enqueue(IOTask const &) +{} - std::future< void > - DummyIOHandler::flush() - { - return std::future< void >(); - } -} // openPMD +std::future DummyIOHandler::flush(internal::FlushParams const &) +{ + return std::future(); +} +} // namespace openPMD diff --git a/src/IO/HDF5/HDF5Auxiliary.cpp b/src/IO/HDF5/HDF5Auxiliary.cpp index 41d86394f4..81396e12a9 100644 --- a/src/IO/HDF5/HDF5Auxiliary.cpp +++ b/src/IO/HDF5/HDF5Auxiliary.cpp @@ -20,305 +20,300 @@ */ #include "openPMD/config.hpp" #if openPMD_HAVE_HDF5 -# include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" -# include "openPMD/auxiliary/StringManip.hpp" -# include "openPMD/backend/Attribute.hpp" -# include "openPMD/backend/Writable.hpp" -# include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" +#include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/backend/Attribute.hpp" +#include "openPMD/backend/Writable.hpp" -# include +#include -# include -# include -# include -# include -# include -# include -# include -# include - -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#include +#include +#include +#include +#include +#include +#include +#include +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif -hid_t -openPMD::GetH5DataType::operator()(Attribute const &att) +hid_t openPMD::GetH5DataType::operator()(Attribute const &att) { using DT = Datatype; - switch (att.dtype) { - case DT::CHAR: - case DT::VEC_CHAR: - return H5Tcopy(H5T_NATIVE_CHAR); - case DT::UCHAR: - case DT::VEC_UCHAR: - return H5Tcopy(H5T_NATIVE_UCHAR); - case DT::SHORT: - case DT::VEC_SHORT: - return H5Tcopy(H5T_NATIVE_SHORT); - case DT::INT: - case DT::VEC_INT: - return H5Tcopy(H5T_NATIVE_INT); - case DT::LONG: - case DT::VEC_LONG: - return H5Tcopy(H5T_NATIVE_LONG); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return H5Tcopy(H5T_NATIVE_LLONG); - case DT::USHORT: - case DT::VEC_USHORT: - return H5Tcopy(H5T_NATIVE_USHORT); - case DT::UINT: - case DT::VEC_UINT: - return H5Tcopy(H5T_NATIVE_UINT); - case DT::ULONG: - case DT::VEC_ULONG: - return H5Tcopy(H5T_NATIVE_ULONG); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return H5Tcopy(H5T_NATIVE_ULLONG); - case DT::FLOAT: - case DT::VEC_FLOAT: - return H5Tcopy(H5T_NATIVE_FLOAT); - case DT::DOUBLE: - case DT::ARR_DBL_7: - case DT::VEC_DOUBLE: - return H5Tcopy(H5T_NATIVE_DOUBLE); - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return H5Tcopy(H5T_NATIVE_LDOUBLE); - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return H5Tcopy( m_userTypes.at( typeid(std::complex< float >).name() ) ); - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return H5Tcopy( m_userTypes.at( typeid(std::complex< double >).name() ) ); - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return H5Tcopy( m_userTypes.at( typeid(std::complex< long double >).name() ) ); - case DT::STRING: { - hid_t string_t_id = H5Tcopy(H5T_C_S1); - size_t const max_len = att.get().size(); - VERIFY(max_len > 0, "[HDF5] max_len must be >0 for STRING"); - herr_t status = H5Tset_size(string_t_id, max_len); - VERIFY(status >= 0, "[HDF5] Internal error: Failed in H5Tset_size for STRING"); - return string_t_id; - } - case DT::VEC_STRING: { - hid_t string_t_id = H5Tcopy(H5T_C_S1); - size_t max_len = 0; - for (std::string const &s : att.get >()) - max_len = std::max(max_len, s.size()); - VERIFY(max_len > 0, "[HDF5] max_len must be >0 for VEC_STRING"); - herr_t status = H5Tset_size(string_t_id, max_len); - VERIFY(status >= 0, "[HDF5] Internal error: Failed in H5Tset_size for VEC_STRING"); - return string_t_id; - } - case DT::BOOL: - return H5Tcopy( m_userTypes.at( typeid(bool).name() ) ); - case DT::DATATYPE: - throw std::runtime_error("[HDF5] Meta-Datatype leaked into IO"); - case DT::UNDEFINED: - throw std::runtime_error("[HDF5] Unknown Attribute datatype (HDF5 datatype)"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented"); + switch (att.dtype) + { + case DT::CHAR: + case DT::VEC_CHAR: + return H5Tcopy(H5T_NATIVE_CHAR); + case DT::UCHAR: + case DT::VEC_UCHAR: + return H5Tcopy(H5T_NATIVE_UCHAR); + case DT::SHORT: + case DT::VEC_SHORT: + return H5Tcopy(H5T_NATIVE_SHORT); + case DT::INT: + case DT::VEC_INT: + return H5Tcopy(H5T_NATIVE_INT); + case DT::LONG: + case DT::VEC_LONG: + return H5Tcopy(H5T_NATIVE_LONG); + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return H5Tcopy(H5T_NATIVE_LLONG); + case DT::USHORT: + case DT::VEC_USHORT: + return H5Tcopy(H5T_NATIVE_USHORT); + case DT::UINT: + case DT::VEC_UINT: + return H5Tcopy(H5T_NATIVE_UINT); + case DT::ULONG: + case DT::VEC_ULONG: + return H5Tcopy(H5T_NATIVE_ULONG); + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return H5Tcopy(H5T_NATIVE_ULLONG); + case DT::FLOAT: + case DT::VEC_FLOAT: + return H5Tcopy(H5T_NATIVE_FLOAT); + case DT::DOUBLE: + case DT::ARR_DBL_7: + case DT::VEC_DOUBLE: + return H5Tcopy(H5T_NATIVE_DOUBLE); + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return H5Tcopy(H5T_NATIVE_LDOUBLE); + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return H5Tcopy(m_userTypes.at(typeid(std::complex).name())); + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return H5Tcopy(m_userTypes.at(typeid(std::complex).name())); + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return H5Tcopy( + m_userTypes.at(typeid(std::complex).name())); + case DT::STRING: { + hid_t string_t_id = H5Tcopy(H5T_C_S1); + size_t const max_len = att.get().size(); + VERIFY(max_len > 0, "[HDF5] max_len must be >0 for STRING"); + herr_t status = H5Tset_size(string_t_id, max_len); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed in H5Tset_size for STRING"); + return string_t_id; + } + case DT::VEC_STRING: { + hid_t string_t_id = H5Tcopy(H5T_C_S1); + size_t max_len = 0; + for (std::string const &s : att.get >()) + max_len = std::max(max_len, s.size()); + VERIFY(max_len > 0, "[HDF5] max_len must be >0 for VEC_STRING"); + herr_t status = H5Tset_size(string_t_id, max_len); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed in H5Tset_size for VEC_STRING"); + return string_t_id; + } + case DT::BOOL: + return H5Tcopy(m_userTypes.at(typeid(bool).name())); + case DT::DATATYPE: + throw std::runtime_error("[HDF5] Meta-Datatype leaked into IO"); + case DT::UNDEFINED: + throw std::runtime_error( + "[HDF5] Unknown Attribute datatype (HDF5 datatype)"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented"); } } - -hid_t -openPMD::getH5DataSpace(Attribute const& att) +hid_t openPMD::getH5DataSpace(Attribute const &att) { using DT = Datatype; - switch( att.dtype ) + switch (att.dtype) { - case DT::CHAR: - case DT::UCHAR: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::FLOAT: - case DT::DOUBLE: - case DT::LONG_DOUBLE: - case DT::CFLOAT: - case DT::CDOUBLE: - case DT::CLONG_DOUBLE: - case DT::STRING: - case DT::BOOL: - return H5Screate(H5S_SCALAR); - case DT::VEC_CHAR: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< char > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_SHORT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< short > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_INT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< int > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_LONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_LONGLONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< long long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_UCHAR: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned char > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_USHORT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned short > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_UINT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned int > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_ULONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_ULONGLONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned long long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_FLOAT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< float > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_DOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< double > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_LONG_DOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< long double > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_CFLOAT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::complex< float > > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_CDOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::complex< double > > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_CLONG_DOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::complex< long double > > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_STRING: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::string > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::ARR_DBL_7: - { - hid_t array_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {7}; - H5Sset_extent_simple(array_t_id, 1, dims, nullptr); - return array_t_id; - } - case DT::UNDEFINED: - throw std::runtime_error("Unknown Attribute datatype (HDF5 dataspace)"); - default: - throw std::runtime_error("Datatype not implemented in HDF5 IO"); + case DT::CHAR: + case DT::UCHAR: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::FLOAT: + case DT::DOUBLE: + case DT::LONG_DOUBLE: + case DT::CFLOAT: + case DT::CDOUBLE: + case DT::CLONG_DOUBLE: + case DT::STRING: + case DT::BOOL: + return H5Screate(H5S_SCALAR); + case DT::VEC_CHAR: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_SHORT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_INT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_LONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_LONGLONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_UCHAR: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_USHORT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_UINT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_ULONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_ULONGLONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_FLOAT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_DOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_LONG_DOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_CFLOAT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = { + att.get > >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_CDOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = { + att.get > >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_CLONG_DOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = { + att.get > >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_STRING: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get >().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::ARR_DBL_7: { + hid_t array_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {7}; + H5Sset_extent_simple(array_t_id, 1, dims, nullptr); + return array_t_id; + } + case DT::UNDEFINED: + throw std::runtime_error("Unknown Attribute datatype (HDF5 dataspace)"); + default: + throw std::runtime_error("Datatype not implemented in HDF5 IO"); } } -std::string -openPMD::concrete_h5_file_position(Writable* w) +std::string openPMD::concrete_h5_file_position(Writable *w) { - std::stack< Writable* > hierarchy; - if( !w->abstractFilePosition ) + std::stack hierarchy; + if (!w->abstractFilePosition) w = w->parent; - while( w ) + while (w) { hierarchy.push(w); w = w->parent; } std::string pos; - while( !hierarchy.empty() ) + while (!hierarchy.empty()) { - pos += std::dynamic_pointer_cast< HDF5FilePosition >(hierarchy.top()->abstractFilePosition)->location; + pos += std::dynamic_pointer_cast( + hierarchy.top()->abstractFilePosition) + ->location; hierarchy.pop(); } return auxiliary::replace_all(pos, "//", "/"); } - -std::vector< hsize_t > -openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, - size_t const typeSize ) +std::vector openPMD::getOptimalChunkDims( + std::vector const dims, size_t const typeSize) { auto const ndims = dims.size(); - std::vector< hsize_t > chunk_dims( dims.size() ); + std::vector chunk_dims(dims.size()); // chunk sizes in KiByte - constexpr std::array< size_t, 7u > CHUNK_SIZES_KiB - {{4096u, 2048u, 1024u, 512u, 256u, 128u, 64u}}; + constexpr std::array CHUNK_SIZES_KiB{ + {4096u, 2048u, 1024u, 512u, 256u, 128u, 64u}}; size_t total_data_size = typeSize; size_t max_chunk_size = typeSize; @@ -344,7 +339,7 @@ openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, } // compute the target chunk size - for( auto const & chunk_size : CHUNK_SIZES_KiB ) + for (auto const &chunk_size : CHUNK_SIZES_KiB) { target_chunk_size = chunk_size * 1024; if (target_chunk_size <= max_chunk_size) @@ -354,7 +349,7 @@ openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, size_t current_chunk_size = typeSize; size_t last_chunk_diff = target_chunk_size; std::multimap::const_iterator current_index = - dims_order.begin(); + dims_order.begin(); while (current_chunk_size < target_chunk_size) { diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 56fad7c737..10b7651f8f 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -23,13 +23,15 @@ #include "openPMD/auxiliary/Environment.hpp" #if openPMD_HAVE_HDF5 -# include "openPMD/Datatype.hpp" -# include "openPMD/auxiliary/Filesystem.hpp" -# include "openPMD/auxiliary/StringManip.hpp" -# include "openPMD/backend/Attribute.hpp" -# include "openPMD/IO/IOTask.hpp" -# include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" -# include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" +#include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/IO/IOTask.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/backend/Attribute.hpp" + +#include #endif #include @@ -46,38 +48,56 @@ namespace openPMD { #if openPMD_HAVE_HDF5 -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif HDF5IOHandlerImpl::HDF5IOHandlerImpl( - AbstractIOHandler* handler, nlohmann::json config) - : AbstractIOHandlerImpl(handler), - m_datasetTransferProperty{H5P_DEFAULT}, - m_fileAccessProperty{H5P_DEFAULT}, - m_H5T_BOOL_ENUM{H5Tenum_create(H5T_NATIVE_INT8)}, - m_H5T_CFLOAT{H5Tcreate(H5T_COMPOUND, sizeof(float) * 2)}, - m_H5T_CDOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(double) * 2)}, - m_H5T_CLONG_DOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(long double) * 2)} + AbstractIOHandler *handler, nlohmann::json config) + : AbstractIOHandlerImpl(handler) + , m_datasetTransferProperty{H5P_DEFAULT} + , m_fileAccessProperty{H5P_DEFAULT} + , m_H5T_BOOL_ENUM{H5Tenum_create(H5T_NATIVE_INT8)} + , m_H5T_CFLOAT{H5Tcreate(H5T_COMPOUND, sizeof(float) * 2)} + , m_H5T_CDOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(double) * 2)} + , m_H5T_CLONG_DOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(long double) * 2)} { // create a h5py compatible bool type - VERIFY(m_H5T_BOOL_ENUM >= 0, "[HDF5] Internal error: Failed to create bool enum"); + VERIFY( + m_H5T_BOOL_ENUM >= 0, + "[HDF5] Internal error: Failed to create bool enum"); std::string t{"TRUE"}; std::string f{"FALSE"}; int64_t tVal = 1; int64_t fVal = 0; herr_t status; status = H5Tenum_insert(m_H5T_BOOL_ENUM, t.c_str(), &tVal); - VERIFY(status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); status = H5Tenum_insert(m_H5T_BOOL_ENUM, f.c_str(), &fVal); - VERIFY(status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); // create h5py compatible complex types - VERIFY(m_H5T_CFLOAT >= 0, "[HDF5] Internal error: Failed to create complex float"); - VERIFY(m_H5T_CDOUBLE >= 0, "[HDF5] Internal error: Failed to create complex double"); - VERIFY(m_H5T_CLONG_DOUBLE >= 0, "[HDF5] Internal error: Failed to create complex long double"); + VERIFY( + m_H5T_CFLOAT >= 0, + "[HDF5] Internal error: Failed to create complex float"); + VERIFY( + m_H5T_CDOUBLE >= 0, + "[HDF5] Internal error: Failed to create complex double"); + VERIFY( + m_H5T_CLONG_DOUBLE >= 0, + "[HDF5] Internal error: Failed to create complex long double"); H5Tinsert(m_H5T_CFLOAT, "r", 0, H5T_NATIVE_FLOAT); H5Tinsert(m_H5T_CFLOAT, "i", sizeof(float), H5T_NATIVE_FLOAT); H5Tinsert(m_H5T_CDOUBLE, "r", 0, H5T_NATIVE_DOUBLE); @@ -85,32 +105,32 @@ HDF5IOHandlerImpl::HDF5IOHandlerImpl( H5Tinsert(m_H5T_CLONG_DOUBLE, "r", 0, H5T_NATIVE_LDOUBLE); H5Tinsert(m_H5T_CLONG_DOUBLE, "i", sizeof(long double), H5T_NATIVE_LDOUBLE); - m_chunks = auxiliary::getEnvString( "OPENPMD_HDF5_CHUNKS", "auto" ); + m_chunks = auxiliary::getEnvString("OPENPMD_HDF5_CHUNKS", "auto"); // JSON option can overwrite env option: - if( config.contains( "hdf5" ) ) + if (config.contains("hdf5")) { - m_config = std::move( config[ "hdf5" ] ); + m_config = std::move(config["hdf5"]); // check for global dataset configs - if( m_config.json().contains( "dataset" ) ) + if (m_config.json().contains("dataset")) { - auto datasetConfig = m_config[ "dataset" ]; - if( datasetConfig.json().contains( "chunks" ) ) + auto datasetConfig = m_config["dataset"]; + if (datasetConfig.json().contains("chunks")) { - m_chunks = datasetConfig[ "chunks" ].json().get< std::string >(); + m_chunks = datasetConfig["chunks"].json().get(); } } - if( m_chunks != "auto" && m_chunks != "none" ) + if (m_chunks != "auto" && m_chunks != "none") { std::cerr << "Warning: HDF5 chunking option set to an invalid " - "value '" << m_chunks << "'. Reset to 'auto'." - << std::endl; + "value '" + << m_chunks << "'. Reset to 'auto'." << std::endl; m_chunks = "auto"; } // unused params auto shadow = m_config.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { std::cerr << "Warning: parts of the JSON configuration for " "HDF5 remain unused:\n" @@ -118,9 +138,10 @@ HDF5IOHandlerImpl::HDF5IOHandlerImpl( } } -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - auto const hdf5_collective_metadata = auxiliary::getEnvString( "OPENPMD_HDF5_COLLECTIVE_METADATA", "ON" ); - if( hdf5_collective_metadata == "ON" ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + auto const hdf5_collective_metadata = + auxiliary::getEnvString("OPENPMD_HDF5_COLLECTIVE_METADATA", "ON"); + if (hdf5_collective_metadata == "ON") m_hdf5_collective_metadata = 1; else m_hdf5_collective_metadata = 0; @@ -131,88 +152,97 @@ HDF5IOHandlerImpl::~HDF5IOHandlerImpl() { herr_t status; status = H5Tclose(m_H5T_BOOL_ENUM); - if( status < 0 ) + if (status < 0) std::cerr << "[HDF5] Internal error: Failed to close bool enum\n"; status = H5Tclose(m_H5T_CFLOAT); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close complex float type\n"; + if (status < 0) + std::cerr + << "[HDF5] Internal error: Failed to close complex float type\n"; status = H5Tclose(m_H5T_CDOUBLE); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close complex double type\n"; + if (status < 0) + std::cerr + << "[HDF5] Internal error: Failed to close complex double type\n"; status = H5Tclose(m_H5T_CLONG_DOUBLE); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close complex long double type\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close complex long " + "double type\n"; - while( !m_openFileIDs.empty() ) + while (!m_openFileIDs.empty()) { auto file = m_openFileIDs.begin(); status = H5Fclose(*file); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close HDF5 file (serial)\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close HDF5 file " + "(serial)\n"; m_openFileIDs.erase(file); } - if( m_datasetTransferProperty != H5P_DEFAULT ) + if (m_datasetTransferProperty != H5P_DEFAULT) { status = H5Pclose(m_datasetTransferProperty); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset transfer property\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset " + "transfer property\n"; } - if( m_fileAccessProperty != H5P_DEFAULT ) + if (m_fileAccessProperty != H5P_DEFAULT) { status = H5Pclose(m_fileAccessProperty); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close HDF5 file access property\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close HDF5 file " + "access property\n"; } } -void -HDF5IOHandlerImpl::createFile(Writable* writable, - Parameter< Operation::CREATE_FILE > const& parameters) +void HDF5IOHandlerImpl::createFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Creating a file in read-only mode is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Creating a file in read-only mode is not possible."); - if( !writable->written ) + if (!writable->written) { - if( !auxiliary::directory_exists(m_handler->directory) ) + if (!auxiliary::directory_exists(m_handler->directory)) { bool success = auxiliary::create_directories(m_handler->directory); - VERIFY(success, "[HDF5] Internal error: Failed to create directories during HDF5 file creation"); + VERIFY( + success, + "[HDF5] Internal error: Failed to create directories during " + "HDF5 file creation"); } std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".h5") ) + if (!auxiliary::ends_with(name, ".h5")) name += ".h5"; unsigned flags; - if( m_handler->m_backendAccess == Access::CREATE ) + if (m_handler->m_backendAccess == Access::CREATE) flags = H5F_ACC_TRUNC; else flags = H5F_ACC_EXCL; - hid_t id = H5Fcreate(name.c_str(), - flags, - H5P_DEFAULT, - m_fileAccessProperty); + hid_t id = + H5Fcreate(name.c_str(), flags, H5P_DEFAULT, m_fileAccessProperty); VERIFY(id >= 0, "[HDF5] Internal error: Failed to create HDF5 file"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >("/"); + writable->abstractFilePosition = + std::make_shared("/"); m_fileNames[writable] = name; - m_fileNamesWithID[std::move(name)]=id; + m_fileNamesWithID[std::move(name)] = id; m_openFileIDs.insert(id); } } -void -HDF5IOHandlerImpl::createPath(Writable* writable, - Parameter< Operation::CREATE_PATH > const& parameters) +void HDF5IOHandlerImpl::createPath( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Creating a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Creating a path in a file opened as read only is not " + "possible."); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } @@ -220,101 +250,115 @@ HDF5IOHandlerImpl::createPath(Writable* writable, herr_t status; - if( !writable->written ) + if (!writable->written) { /* Sanitize path */ std::string path = parameters.path; - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; /* Open H5Object to write into */ - Writable* position; - if( writable->parent ) + Writable *position; + if (writable->parent) position = writable->parent; else - position = writable; /* root does not have a parent but might still have to be written */ - File file = getFile( position ).get(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(position).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path creation"); + position = writable; /* root does not have a parent but might still + have to be written */ + File file = getFile(position).get(); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(position).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path " + "creation"); /* Create the path in the file */ - std::stack< hid_t > groups; + std::stack groups; groups.push(node_id); - for( std::string const& folder : auxiliary::split(path, "/", false) ) + for (std::string const &folder : auxiliary::split(path, "/", false)) { // avoid creation of paths that already exist - htri_t const found = H5Lexists(groups.top(), folder.c_str(), H5P_DEFAULT); + htri_t const found = + H5Lexists(groups.top(), folder.c_str(), H5P_DEFAULT); if (found > 0) - continue; - - hid_t group_id = H5Gcreate(groups.top(), - folder.c_str(), - H5P_DEFAULT, - H5P_DEFAULT, - H5P_DEFAULT); - VERIFY(group_id >= 0, "[HDF5] Internal error: Failed to create HDF5 group during path creation"); + continue; + + hid_t group_id = H5Gcreate( + groups.top(), + folder.c_str(), + H5P_DEFAULT, + H5P_DEFAULT, + H5P_DEFAULT); + VERIFY( + group_id >= 0, + "[HDF5] Internal error: Failed to create HDF5 group during " + "path creation"); groups.push(group_id); } /* Close the groups */ - while( !groups.empty() ) + while (!groups.empty()) { status = H5Gclose(groups.top()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "creation"); groups.pop(); } writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(path); + writable->abstractFilePosition = + std::make_shared(path); m_fileNames[writable] = file.name; } status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during path creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during path " + "creation"); } -void -HDF5IOHandlerImpl::createDataset(Writable* writable, - Parameter< Operation::CREATE_DATASET > const& parameters) +void HDF5IOHandlerImpl::createDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Creating a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Creating a dataset in a file opened as read only is not " + "possible."); - if( !writable->written ) + if (!writable->written) { /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( auxiliary::ends_with(name, '/') ) + if (auxiliary::ends_with(name, '/')) name = auxiliary::replace_last(name, "/", ""); - auto config = nlohmann::json::parse( parameters.options ); + auto config = nlohmann::json::parse(parameters.options); // general bool is_resizable_dataset = false; - if( config.contains( "resizable" ) ) + if (config.contains("resizable")) { - is_resizable_dataset = config.at( "resizable" ).get< bool >(); + is_resizable_dataset = config.at("resizable").get(); } // HDF5 specific - if( config.contains( "hdf5" ) && - config[ "hdf5" ].contains( "dataset" ) ) + if (config.contains("hdf5") && config["hdf5"].contains("dataset")) { - auxiliary::TracingJSON datasetConfig{ - config[ "hdf5" ][ "dataset" ] }; + auxiliary::TracingJSON datasetConfig{config["hdf5"]["dataset"]}; /* * @todo Read more options from config here. */ auto shadow = datasetConfig.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { std::cerr << "Warning: parts of the JSON configuration for " "HDF5 dataset '" @@ -324,26 +368,30 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, } hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif /* Open H5Object to write into */ - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset creation"); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "creation"); Datatype d = parameters.dtype; - if( d == Datatype::UNDEFINED ) + if (d == Datatype::UNDEFINED) { // TODO handle unknown dtype - std::cerr << "[HDF5] Datatype::UNDEFINED caught during dataset creation (serial HDF5)" << std::endl; + std::cerr << "[HDF5] Datatype::UNDEFINED caught during dataset " + "creation (serial HDF5)" + << std::endl; d = Datatype::BOOL; } @@ -353,54 +401,69 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, // Attribute a(0); Attribute a(static_cast(0)); a.dtype = d; - std::vector< hsize_t > dims; + std::vector dims; std::uint64_t num_elements = 1u; - for( auto const& val : parameters.extent ) { - dims.push_back(static_cast< hsize_t >(val)); + for (auto const &val : parameters.extent) + { + dims.push_back(static_cast(val)); num_elements *= val; } - std::vector< hsize_t > max_dims( dims.begin(), dims.end() ); - if( is_resizable_dataset ) - max_dims.assign( dims.size(), H5F_UNLIMITED ); + std::vector max_dims(dims.begin(), dims.end()); + if (is_resizable_dataset) + max_dims.assign(dims.size(), H5F_UNLIMITED); - hid_t space = H5Screate_simple(static_cast< int >(dims.size()), dims.data(), max_dims.data()); - VERIFY(space >= 0, "[HDF5] Internal error: Failed to create dataspace during dataset creation"); + hid_t space = H5Screate_simple( + static_cast(dims.size()), dims.data(), max_dims.data()); + VERIFY( + space >= 0, + "[HDF5] Internal error: Failed to create dataspace during dataset " + "creation"); /* enable chunking on the created dataspace */ hid_t datasetCreationProperty = H5Pcreate(H5P_DATASET_CREATE); - if( num_elements != 0u && m_chunks != "none" ) + H5Pset_fill_time(datasetCreationProperty, H5D_FILL_TIME_NEVER); + + if (num_elements != 0u && m_chunks != "none") { //! @todo add per dataset chunk control from JSON config // get chunking dimensions - std::vector< hsize_t > chunk_dims = getOptimalChunkDims(dims, toBytes(d)); + std::vector chunk_dims = + getOptimalChunkDims(dims, toBytes(d)); //! @todo allow overwrite with user-provided chunk size - //for( auto const& val : parameters.chunkSize ) - // chunk_dims.push_back(static_cast< hsize_t >(val)); - - herr_t status = H5Pset_chunk(datasetCreationProperty, chunk_dims.size(), chunk_dims.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to set chunk size during dataset creation"); + // for( auto const& val : parameters.chunkSize ) + // chunk_dims.push_back(static_cast< hsize_t >(val)); + + herr_t status = H5Pset_chunk( + datasetCreationProperty, chunk_dims.size(), chunk_dims.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to set chunk size during " + "dataset creation"); } - std::string const& compression = parameters.compression; - if( !compression.empty() ) - std::cerr << "[HDF5] Compression not yet implemented in HDF5 backend." - << std::endl; + std::string const &compression = parameters.compression; + if (!compression.empty()) + std::cerr + << "[HDF5] Compression not yet implemented in HDF5 backend." + << std::endl; /* { - std::vector< std::string > args = auxiliary::split(compression, ":"); - std::string const& format = args[0]; - if( (format == "zlib" || format == "gzip" || format == "deflate") + std::vector< std::string > args = auxiliary::split(compression, + ":"); std::string const& format = args[0]; if( (format == "zlib" || + format == "gzip" || format == "deflate") && args.size() == 2 ) { - status = H5Pset_deflate(datasetCreationProperty, std::stoi(args[1])); - VERIFY(status == 0, "[HDF5] Internal error: Failed to set deflate compression during dataset creation"); - } else if( format == "szip" || format == "nbit" || format == "scaleoffset" ) - std::cerr << "[HDF5] Compression format " << format - << " not yet implemented. Data will not be compressed!" + status = H5Pset_deflate(datasetCreationProperty, + std::stoi(args[1])); VERIFY(status == 0, "[HDF5] Internal error: Failed + to set deflate compression during dataset creation"); } else if( format + == "szip" || format == "nbit" || format == "scaleoffset" ) std::cerr << + "[HDF5] Compression format " << format + << " not yet implemented. Data will not be + compressed!" << std::endl; else std::cerr << "[HDF5] Compression format " << format @@ -409,127 +472,161 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, } */ - std::string const& transform = parameters.transform; - if( !transform.empty() ) - std::cerr << "[HDF5] Custom transform not yet implemented in HDF5 backend." + std::string const &transform = parameters.transform; + if (!transform.empty()) + std::cerr << "[HDF5] Custom transform not yet implemented in HDF5 " + "backend." << std::endl; GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t datatype = getH5DataType(a); - VERIFY(datatype >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset creation"); - hid_t group_id = H5Dcreate(node_id, - name.c_str(), - datatype, - space, - H5P_DEFAULT, - datasetCreationProperty, - H5P_DEFAULT); - VERIFY(group_id >= 0, "[HDF5] Internal error: Failed to create HDF5 group during dataset creation"); + VERIFY( + datatype >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " + "creation"); + hid_t group_id = H5Dcreate( + node_id, + name.c_str(), + datatype, + space, + H5P_DEFAULT, + datasetCreationProperty, + H5P_DEFAULT); + VERIFY( + group_id >= 0, + "[HDF5] Internal error: Failed to create HDF5 group during dataset " + "creation"); herr_t status; status = H5Dclose(group_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during " + "dataset creation"); status = H5Tclose(datatype); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 datatype during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 datatype during " + "dataset creation"); status = H5Pclose(datasetCreationProperty); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset creation property during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset creation " + "property during dataset creation"); status = H5Sclose(space); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset space during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset space during " + "dataset creation"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during dataset " + "creation"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during " + "dataset creation"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(name); + writable->abstractFilePosition = + std::make_shared(name); m_fileNames[writable] = file.name; } } -void -HDF5IOHandlerImpl::extendDataset(Writable* writable, - Parameter< Operation::EXTEND_DATASET > const& parameters) +void HDF5IOHandlerImpl::extendDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Extending a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Extending a dataset in a file opened as read only is not " + "possible."); - if( !writable->written ) - throw std::runtime_error("[HDF5] Extending an unwritten Dataset is not possible."); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Extending an unwritten Dataset is not possible."); - auto res = getFile( writable ); - if( !res ) - res = getFile( writable->parent ); - hid_t dataset_id = H5Dopen(res.get().id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset extension"); + auto res = getFile(writable); + if (!res) + res = getFile(writable->parent); + hid_t dataset_id = H5Dopen( + res.get().id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "extension"); // Datasets may only be extended if they have chunked layout, so let's see // whether this one does { - hid_t dataset_space = H5Dget_space( dataset_id ); - int ndims = H5Sget_simple_extent_ndims( dataset_space ); + hid_t dataset_space = H5Dget_space(dataset_id); + int ndims = H5Sget_simple_extent_ndims(dataset_space); VERIFY( ndims >= 0, "[HDF5]: Internal error: Failed to retrieve dimensionality of " - "dataset during dataset read." ); - hid_t propertyList = H5Dget_create_plist( dataset_id ); - std::vector< hsize_t > chunkExtent( ndims, 0 ); + "dataset during dataset read."); + hid_t propertyList = H5Dget_create_plist(dataset_id); + std::vector chunkExtent(ndims, 0); int chunkDimensionality = - H5Pget_chunk( propertyList, ndims, chunkExtent.data() ); - if( chunkDimensionality < 0 ) + H5Pget_chunk(propertyList, ndims, chunkExtent.data()); + if (chunkDimensionality < 0) { throw std::runtime_error( "[HDF5] Cannot extend datasets unless written with chunked " - "layout." ); + "layout."); } } - std::vector< hsize_t > size; - for( auto const& val : parameters.extent ) - size.push_back(static_cast< hsize_t >(val)); + std::vector size; + for (auto const &val : parameters.extent) + size.push_back(static_cast(val)); herr_t status; status = H5Dset_extent(dataset_id, size.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to extend HDF5 dataset during dataset extension"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to extend HDF5 dataset during dataset " + "extension"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset extension"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during dataset " + "extension"); } -void -HDF5IOHandlerImpl::availableChunks( - Writable * writable, - Parameter< Operation::AVAILABLE_CHUNKS > & parameters ) +void HDF5IOHandlerImpl::availableChunks( + Writable *writable, Parameter ¶meters) { - auto fname = m_fileNames.find( writable ); - VERIFY( fname != m_fileNames.end(), - "[HDF5] File name not found in writable" ); - auto fid = m_fileNamesWithID.find( fname->second ); - VERIFY( fid != m_fileNamesWithID.end(), - "[HDF5] File ID not found with file name" ); + auto fname = m_fileNames.find(writable); + VERIFY( + fname != m_fileNames.end(), "[HDF5] File name not found in writable"); + auto fid = m_fileNamesWithID.find(fname->second); + VERIFY( + fid != m_fileNamesWithID.end(), + "[HDF5] File ID not found with file name"); hid_t dataset_id = H5Dopen( - fid->second, - concrete_h5_file_position( writable ).c_str(), - H5P_DEFAULT ); + fid->second, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); VERIFY( dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " - "read" ); - hid_t dataset_space = H5Dget_space( dataset_id ); - int ndims = H5Sget_simple_extent_ndims( dataset_space ); + "read"); + hid_t dataset_space = H5Dget_space(dataset_id); + int ndims = H5Sget_simple_extent_ndims(dataset_space); VERIFY( ndims >= 0, "[HDF5]: Internal error: Failed to retrieve dimensionality of " "dataset " - "during dataset read." ); + "during dataset read."); // // now let's figure out whether this one has chunks // hid_t propertyList = H5Dget_create_plist( dataset_id ); @@ -549,168 +646,175 @@ HDF5IOHandlerImpl::availableChunks( // */ // } - std::vector< hsize_t > dims( ndims, 0 ); + std::vector dims(ndims, 0); // return value is equal to ndims - H5Sget_simple_extent_dims( dataset_space, dims.data(), nullptr ); + H5Sget_simple_extent_dims(dataset_space, dims.data(), nullptr); - Offset offset( ndims, 0 ); + Offset offset(ndims, 0); Extent extent; - extent.reserve( ndims ); - for( auto e : dims ) + extent.reserve(ndims); + for (auto e : dims) { - extent.push_back( e ); + extent.push_back(e); } parameters.chunks->push_back( - WrittenChunkInfo( std::move( offset ), std::move( extent ) ) ); + WrittenChunkInfo(std::move(offset), std::move(extent))); } -void -HDF5IOHandlerImpl::openFile( - Writable * writable, - Parameter< Operation::OPEN_FILE > const & parameters ) +void HDF5IOHandlerImpl::openFile( + Writable *writable, Parameter const ¶meters) { - if( !auxiliary::directory_exists(m_handler->directory) ) - throw no_such_file_error("[HDF5] Supplied directory is not valid: " + m_handler->directory); + if (!auxiliary::directory_exists(m_handler->directory)) + throw no_such_file_error( + "[HDF5] Supplied directory is not valid: " + m_handler->directory); std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".h5") ) + if (!auxiliary::ends_with(name, ".h5")) name += ".h5"; // this may (intentionally) overwrite - m_fileNames[ writable ] = name; + m_fileNames[writable] = name; // check if file already open auto search = m_fileNamesWithID.find(name); - if (search != m_fileNamesWithID.end()) { - return; + if (search != m_fileNamesWithID.end()) + { + return; } unsigned flags; Access at = m_handler->m_backendAccess; - if( at == Access::READ_ONLY ) + if (at == Access::READ_ONLY) flags = H5F_ACC_RDONLY; - else if( at == Access::READ_WRITE || at == Access::CREATE ) + else if (at == Access::READ_WRITE || at == Access::CREATE) flags = H5F_ACC_RDWR; else throw std::runtime_error("[HDF5] Unknown file Access"); hid_t file_id; - file_id = H5Fopen(name.c_str(), - flags, - m_fileAccessProperty); - if( file_id < 0 ) + file_id = H5Fopen(name.c_str(), flags, m_fileAccessProperty); + if (file_id < 0) throw no_such_file_error("[HDF5] Failed to open HDF5 file " + name); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >("/"); + writable->abstractFilePosition = std::make_shared("/"); m_fileNamesWithID.erase(name); m_fileNamesWithID.insert({std::move(name), file_id}); m_openFileIDs.insert(file_id); } -void -HDF5IOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & ) +void HDF5IOHandlerImpl::closeFile( + Writable *writable, Parameter const &) { - auto optionalFile = getFile( writable ); - if( ! optionalFile ) + auto optionalFile = getFile(writable); + if (!optionalFile) { throw std::runtime_error( "[HDF5] Trying to close a file that is not " - "present in the backend" ); + "present in the backend"); } File file = optionalFile.get(); - H5Fclose( file.id ); - m_openFileIDs.erase( file.id ); - m_fileNames.erase( writable ); + H5Fclose(file.id); + m_openFileIDs.erase(file.id); + m_fileNames.erase(writable); - m_fileNamesWithID.erase( file.name ); + m_fileNamesWithID.erase(file.name); } -void -HDF5IOHandlerImpl::openPath( - Writable * writable, - Parameter< Operation::OPEN_PATH > const & parameters ) +void HDF5IOHandlerImpl::openPath( + Writable *writable, Parameter const ¶meters) { File file = getFile(writable->parent).get(); hid_t node_id, path_id; hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path opening"); + node_id = H5Gopen( + file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path opening"); /* Sanitize path */ std::string path = parameters.path; - if( !path.empty() ) + if (!path.empty()) { - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; - path_id = H5Gopen(node_id, - path.c_str(), - gapl); - VERIFY(path_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path opening"); + path_id = H5Gopen(node_id, path.c_str(), gapl); + VERIFY( + path_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path " + "opening"); herr_t status; status = H5Gclose(path_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "opening"); } herr_t status; status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "opening"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during path opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during path " + "opening"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(path); + writable->abstractFilePosition = std::make_shared(path); m_fileNames.erase(writable); m_fileNames.insert({writable, file.name}); } -void -HDF5IOHandlerImpl::openDataset(Writable* writable, - Parameter< Operation::OPEN_DATASET > & parameters) +void HDF5IOHandlerImpl::openDataset( + Writable *writable, Parameter ¶meters) { - File file = getFile( writable->parent ).get(); + File file = getFile(writable->parent).get(); hid_t node_id, dataset_id; hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset opening"); + node_id = H5Gopen( + file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "opening"); /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) name += '/'; - dataset_id = H5Dopen(node_id, - name.c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset opening"); + dataset_id = H5Dopen(node_id, name.c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "opening"); hid_t dataset_type, dataset_space; dataset_type = H5Dget_type(dataset_id); @@ -720,99 +824,117 @@ HDF5IOHandlerImpl::openDataset(Writable* writable, using DT = Datatype; Datatype d; - if( dataset_class == H5S_SIMPLE || dataset_class == H5S_SCALAR || dataset_class == H5S_NULL ) + if (dataset_class == H5S_SIMPLE || dataset_class == H5S_SCALAR || + dataset_class == H5S_NULL) { - if( H5Tequal(dataset_type, H5T_NATIVE_CHAR) ) + if (H5Tequal(dataset_type, H5T_NATIVE_CHAR)) d = DT::CHAR; - else if( H5Tequal(dataset_type, H5T_NATIVE_UCHAR) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_UCHAR)) d = DT::UCHAR; - else if( H5Tequal(dataset_type, H5T_NATIVE_SHORT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_SHORT)) d = DT::SHORT; - else if( H5Tequal(dataset_type, H5T_NATIVE_INT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_INT)) d = DT::INT; - else if( H5Tequal(dataset_type, H5T_NATIVE_LONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_LONG)) d = DT::LONG; - else if( H5Tequal(dataset_type, H5T_NATIVE_LLONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_LLONG)) d = DT::LONGLONG; - else if( H5Tequal(dataset_type, H5T_NATIVE_FLOAT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_FLOAT)) d = DT::FLOAT; - else if( H5Tequal(dataset_type, H5T_NATIVE_DOUBLE) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_DOUBLE)) d = DT::DOUBLE; - else if( H5Tequal(dataset_type, H5T_NATIVE_LDOUBLE) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_LDOUBLE)) d = DT::LONG_DOUBLE; - else if( H5Tequal(dataset_type, m_H5T_CFLOAT) ) + else if (H5Tequal(dataset_type, m_H5T_CFLOAT)) d = DT::CFLOAT; - else if( H5Tequal(dataset_type, m_H5T_CDOUBLE) ) + else if (H5Tequal(dataset_type, m_H5T_CDOUBLE)) d = DT::CDOUBLE; - else if( H5Tequal(dataset_type, m_H5T_CLONG_DOUBLE) ) + else if (H5Tequal(dataset_type, m_H5T_CLONG_DOUBLE)) d = DT::CLONG_DOUBLE; - else if( H5Tequal(dataset_type, H5T_NATIVE_USHORT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_USHORT)) d = DT::USHORT; - else if( H5Tequal(dataset_type, H5T_NATIVE_UINT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_UINT)) d = DT::UINT; - else if( H5Tequal(dataset_type, H5T_NATIVE_ULONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_ULONG)) d = DT::ULONG; - else if( H5Tequal(dataset_type, H5T_NATIVE_ULLONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_ULLONG)) d = DT::ULONGLONG; - else if( H5Tget_class(dataset_type) == H5T_STRING ) + else if (H5Tget_class(dataset_type) == H5T_STRING) d = DT::STRING; else throw std::runtime_error("[HDF5] Unknown dataset type"); - } else + } + else throw std::runtime_error("[HDF5] Unsupported dataset class"); auto dtype = parameters.dtype; *dtype = d; int ndims = H5Sget_simple_extent_ndims(dataset_space); - std::vector< hsize_t > dims(ndims, 0); - std::vector< hsize_t > maxdims(ndims, 0); + std::vector dims(ndims, 0); + std::vector maxdims(ndims, 0); - H5Sget_simple_extent_dims(dataset_space, - dims.data(), - maxdims.data()); + H5Sget_simple_extent_dims(dataset_space, dims.data(), maxdims.data()); Extent e; - for( auto const& val : dims ) + for (auto const &val : dims) e.push_back(val); auto extent = parameters.extent; *extent = e; herr_t status; status = H5Sclose(dataset_space); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset space during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset space during " + "dataset opening"); status = H5Tclose(dataset_type); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset type during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset type during " + "dataset opening"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during dataset " + "opening"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during dataset " + "opening"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during dataset " + "opening"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(name); + writable->abstractFilePosition = std::make_shared(name); m_fileNames[writable] = file.name; } -void -HDF5IOHandlerImpl::deleteFile(Writable* writable, - Parameter< Operation::DELETE_FILE > const& parameters) +void HDF5IOHandlerImpl::deleteFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting a file opened as read only is not possible."); - if( writable->written ) + if (writable->written) { - hid_t file_id = getFile( writable ).get().id; + hid_t file_id = getFile(writable).get().id; herr_t status = H5Fclose(file_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 file during file deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 file during file " + "deletion"); std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".h5") ) + if (!auxiliary::ends_with(name, ".h5")) name += ".h5"; - if( !auxiliary::file_exists(name) ) + if (!auxiliary::file_exists(name)) throw std::runtime_error("[HDF5] File does not exist: " + name); auxiliary::remove_file(name); @@ -826,41 +948,50 @@ HDF5IOHandlerImpl::deleteFile(Writable* writable, } } -void -HDF5IOHandlerImpl::deletePath(Writable* writable, - Parameter< Operation::DELETE_PATH > const& parameters) +void HDF5IOHandlerImpl::deletePath( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting a path in a file opened as read only is not " + "possible."); - if( writable->written ) + if (writable->written) { /* Sanitize path */ std::string path = parameters.path; - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; /* Open H5Object to delete in * Ugly hack: H5Ldelete can't delete "." * Work around this by deleting from the parent */ - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path deletion"); - - path += static_cast< HDF5FilePosition* >(writable->abstractFilePosition.get())->location; - herr_t status = H5Ldelete(node_id, - path.c_str(), - H5P_DEFAULT); - VERIFY(status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); + hid_t node_id = H5Gopen( + file.id, + concrete_h5_file_position(writable->parent).c_str(), + H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path " + "deletion"); + + path += static_cast( + writable->abstractFilePosition.get()) + ->location; + herr_t status = H5Ldelete(node_id, path.c_str(), H5P_DEFAULT); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "deletion"); writable->written = false; writable->abstractFilePosition.reset(); @@ -869,41 +1000,50 @@ HDF5IOHandlerImpl::deletePath(Writable* writable, } } -void -HDF5IOHandlerImpl::deleteDataset(Writable* writable, - Parameter< Operation::DELETE_DATASET > const& parameters) +void HDF5IOHandlerImpl::deleteDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting a path in a file opened as read only is not " + "possible."); - if( writable->written ) + if (writable->written) { /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) name += '/'; /* Open H5Object to delete in * Ugly hack: H5Ldelete can't delete "." * Work around this by deleting from the parent */ - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset deletion"); - - name += static_cast< HDF5FilePosition* >(writable->abstractFilePosition.get())->location; - herr_t status = H5Ldelete(node_id, - name.c_str(), - H5P_DEFAULT); - VERIFY(status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); + hid_t node_id = H5Gopen( + file.id, + concrete_h5_file_position(writable->parent).c_str(), + H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "deletion"); + + name += static_cast( + writable->abstractFilePosition.get()) + ->location; + herr_t status = H5Ldelete(node_id, name.c_str(), H5P_DEFAULT); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during dataset deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during dataset " + "deletion"); writable->written = false; writable->abstractFilePosition.reset(); @@ -912,704 +1052,770 @@ HDF5IOHandlerImpl::deleteDataset(Writable* writable, } } -void -HDF5IOHandlerImpl::deleteAttribute(Writable* writable, - Parameter< Operation::DELETE_ATT > const& parameters) +void HDF5IOHandlerImpl::deleteAttribute( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting an attribute in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting an attribute in a file opened as read only is not " + "possible."); - if( writable->written ) + if (writable->written) { std::string name = parameters.name; /* Open H5Object to delete in */ - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); - hid_t node_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during attribute deletion"); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); + hid_t node_id = H5Oopen( + file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during attribute " + "deletion"); - herr_t status = H5Adelete(node_id, - name.c_str()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to delete HDF5 attribute"); + herr_t status = H5Adelete(node_id, name.c_str()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to delete HDF5 attribute"); status = H5Oclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during attribute deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during " + "attribute deletion"); } } -void -HDF5IOHandlerImpl::writeDataset(Writable* writable, - Parameter< Operation::WRITE_DATASET > const& parameters) +void HDF5IOHandlerImpl::writeDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Writing into a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Writing into a dataset in a file opened as read only is " + "not possible."); - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t dataset_id, filespace, memspace; herr_t status; - dataset_id = H5Dopen(file.id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset write"); - - std::vector< hsize_t > start; - for( auto const& val : parameters.offset ) - start.push_back(static_cast< hsize_t >(val)); - std::vector< hsize_t > stride(start.size(), 1); /* contiguous region */ - std::vector< hsize_t > count(start.size(), 1); /* single region */ - std::vector< hsize_t > block; - for( auto const& val : parameters.extent ) - block.push_back(static_cast< hsize_t >(val)); - memspace = H5Screate_simple(static_cast< int >(block.size()), block.data(), nullptr); + dataset_id = H5Dopen( + file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "write"); + + std::vector start; + for (auto const &val : parameters.offset) + start.push_back(static_cast(val)); + std::vector stride(start.size(), 1); /* contiguous region */ + std::vector count(start.size(), 1); /* single region */ + std::vector block; + for (auto const &val : parameters.extent) + block.push_back(static_cast(val)); + memspace = + H5Screate_simple(static_cast(block.size()), block.data(), nullptr); filespace = H5Dget_space(dataset_id); - status = H5Sselect_hyperslab(filespace, - H5S_SELECT_SET, - start.data(), - stride.data(), - count.data(), - block.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to select hyperslab during dataset write"); + status = H5Sselect_hyperslab( + filespace, + H5S_SELECT_SET, + start.data(), + stride.data(), + count.data(), + block.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to select hyperslab during dataset " + "write"); - std::shared_ptr< void const > data = parameters.data; + std::shared_ptr data = parameters.data; GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); - //TODO Check if parameter dtype and dataset dtype match + // TODO Check if parameter dtype and dataset dtype match Attribute a(0); a.dtype = parameters.dtype; hid_t dataType = getH5DataType(a); - VERIFY(dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset write"); - switch( a.dtype ) + VERIFY( + dataType >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " + "write"); + switch (a.dtype) { using DT = Datatype; - case DT::LONG_DOUBLE: - case DT::DOUBLE: - case DT::FLOAT: - case DT::CLONG_DOUBLE: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::BOOL: - status = H5Dwrite(dataset_id, - dataType, - memspace, - filespace, - m_datasetTransferProperty, - data.get()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to write dataset " + concrete_h5_file_position(writable)); - break; - case DT::UNDEFINED: - throw std::runtime_error("[HDF5] Undefined Attribute datatype"); - case DT::DATATYPE: - throw std::runtime_error("[HDF5] Meta-Datatype leaked into IO"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); + case DT::LONG_DOUBLE: + case DT::DOUBLE: + case DT::FLOAT: + case DT::CLONG_DOUBLE: + case DT::CDOUBLE: + case DT::CFLOAT: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::CHAR: + case DT::UCHAR: + case DT::BOOL: + status = H5Dwrite( + dataset_id, + dataType, + memspace, + filespace, + m_datasetTransferProperty, + data.get()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to write dataset " + + concrete_h5_file_position(writable)); + break; + case DT::UNDEFINED: + throw std::runtime_error("[HDF5] Undefined Attribute datatype"); + case DT::DATATYPE: + throw std::runtime_error("[HDF5] Meta-Datatype leaked into IO"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } status = H5Tclose(dataType); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset datatype during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset datatype during " + "dataset write"); status = H5Sclose(filespace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset file space during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset file space during " + "dataset write"); status = H5Sclose(memspace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset memory space during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset memory space during " + "dataset write"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset " + concrete_h5_file_position(writable) + " during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset " + + concrete_h5_file_position(writable) + " during dataset write"); m_fileNames[writable] = file.name; } -void -HDF5IOHandlerImpl::writeAttribute(Writable* writable, - Parameter< Operation::WRITE_ATT > const& parameters) +void HDF5IOHandlerImpl::writeAttribute( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Writing an attribute in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Writing an attribute in a file opened as read only is not " + "possible."); - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t node_id, attribute_id; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(fapl, true); } #endif - node_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - fapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 object during attribute write"); + node_id = + H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 object during attribute " + "write"); Attribute const att(parameters.resource); Datatype dtype = parameters.dtype; herr_t status; GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t dataType = getH5DataType(att); - VERIFY(dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during attribute write"); + VERIFY( + dataType >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during attribute " + "write"); std::string name = parameters.name; - if( H5Aexists(node_id, name.c_str()) == 0 ) + if (H5Aexists(node_id, name.c_str()) == 0) { hid_t dataspace = getH5DataSpace(att); - VERIFY(dataspace >= 0, "[HDF5] Internal error: Failed to get HDF5 dataspace during attribute write"); - attribute_id = H5Acreate(node_id, - name.c_str(), - dataType, - dataspace, - H5P_DEFAULT, - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to create HDF5 attribute during attribute write"); + VERIFY( + dataspace >= 0, + "[HDF5] Internal error: Failed to get HDF5 dataspace during " + "attribute write"); + attribute_id = H5Acreate( + node_id, + name.c_str(), + dataType, + dataspace, + H5P_DEFAULT, + H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to create HDF5 attribute during " + "attribute write"); status = H5Sclose(dataspace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataspace during attribute write"); - } else + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataspace during " + "attribute write"); + } + else { - attribute_id = H5Aopen(node_id, - name.c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 attribute during attribute write"); + attribute_id = H5Aopen(node_id, name.c_str(), H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 attribute during " + "attribute write"); } using DT = Datatype; - switch( dtype ) + switch (dtype) { - case DT::CHAR: - { - char c = att.get< char >(); - status = H5Awrite(attribute_id, dataType, &c); - break; - } - case DT::UCHAR: - { - auto u = att.get< unsigned char >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::SHORT: - { - auto i = att.get< short >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::INT: - { - int i = att.get< int >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::LONG: - { - long i = att.get< long >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::LONGLONG: - { - auto i = att.get< long long >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::USHORT: - { - auto u = att.get< unsigned short >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::UINT: - { - auto u = att.get< unsigned int >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::ULONG: - { - auto u = att.get< unsigned long >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::ULONGLONG: - { - auto u = att.get< unsigned long long >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::FLOAT: - { - auto f = att.get< float >(); - status = H5Awrite(attribute_id, dataType, &f); - break; - } - case DT::DOUBLE: - { - auto d = att.get< double >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::LONG_DOUBLE: - { - auto d = att.get< long double >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::CFLOAT: - { - std::complex< float > f = att.get< std::complex< float > >(); - status = H5Awrite(attribute_id, dataType, &f); - break; - } - case DT::CDOUBLE: - { - std::complex< double > d = att.get< std::complex< double > >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::CLONG_DOUBLE: - { - std::complex< long double > d = att.get< std::complex< long double > >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::STRING: - status = H5Awrite(attribute_id, - dataType, - att.get< std::string >().c_str()); - break; - case DT::VEC_CHAR: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< char > >().data()); - break; - case DT::VEC_SHORT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< short > >().data()); - break; - case DT::VEC_INT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< int > >().data()); - break; - case DT::VEC_LONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< long > >().data()); - break; - case DT::VEC_LONGLONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< long long > >().data()); - break; - case DT::VEC_UCHAR: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned char > >().data()); - break; - case DT::VEC_USHORT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned short > >().data()); - break; - case DT::VEC_UINT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned int > >().data()); - break; - case DT::VEC_ULONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned long > >().data()); - break; - case DT::VEC_ULONGLONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned long long > >().data()); - break; - case DT::VEC_FLOAT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< float > >().data()); - break; - case DT::VEC_DOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< double > >().data()); - break; - case DT::VEC_LONG_DOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< long double > >().data()); - break; - case DT::VEC_CFLOAT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< std::complex< float > > >().data()); - break; - case DT::VEC_CDOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< std::complex< double > > >().data()); - break; - case DT::VEC_CLONG_DOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< std::complex< long double > > >().data()); - break; - case DT::VEC_STRING: - { - auto vs = att.get< std::vector< std::string > >(); - size_t max_len = 0; - for( std::string const& s : vs ) - max_len = std::max(max_len, s.size()); - std::unique_ptr< char[] > c_str(new char[max_len * vs.size()]); - for( size_t i = 0; i < vs.size(); ++i ) - strncpy(c_str.get() + i*max_len, vs[i].c_str(), max_len); - status = H5Awrite(attribute_id, dataType, c_str.get()); - break; - } - case DT::ARR_DBL_7: - status = H5Awrite(attribute_id, - dataType, - att.get< std::array< double, 7 > >().data()); - break; - case DT::BOOL: - { - bool b = att.get< bool >(); - status = H5Awrite(attribute_id, dataType, &b); - break; - } - case DT::UNDEFINED: - case DT::DATATYPE: - throw std::runtime_error("[HDF5] Unknown Attribute datatype (HDF5 Attribute write)"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); + case DT::CHAR: { + char c = att.get(); + status = H5Awrite(attribute_id, dataType, &c); + break; + } + case DT::UCHAR: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::SHORT: { + auto i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::INT: { + int i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::LONG: { + long i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::LONGLONG: { + auto i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::USHORT: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::UINT: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::ULONG: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::ULONGLONG: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::FLOAT: { + auto f = att.get(); + status = H5Awrite(attribute_id, dataType, &f); + break; + } + case DT::DOUBLE: { + auto d = att.get(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::LONG_DOUBLE: { + auto d = att.get(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::CFLOAT: { + std::complex f = att.get >(); + status = H5Awrite(attribute_id, dataType, &f); + break; + } + case DT::CDOUBLE: { + std::complex d = att.get >(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::CLONG_DOUBLE: { + std::complex d = att.get >(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::STRING: + status = + H5Awrite(attribute_id, dataType, att.get().c_str()); + break; + case DT::VEC_CHAR: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_SHORT: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_INT: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_LONG: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_LONGLONG: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_UCHAR: + status = H5Awrite( + attribute_id, + dataType, + att.get >().data()); + break; + case DT::VEC_USHORT: + status = H5Awrite( + attribute_id, + dataType, + att.get >().data()); + break; + case DT::VEC_UINT: + status = H5Awrite( + attribute_id, + dataType, + att.get >().data()); + break; + case DT::VEC_ULONG: + status = H5Awrite( + attribute_id, + dataType, + att.get >().data()); + break; + case DT::VEC_ULONGLONG: + status = H5Awrite( + attribute_id, + dataType, + att.get >().data()); + break; + case DT::VEC_FLOAT: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_DOUBLE: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::VEC_LONG_DOUBLE: + status = H5Awrite( + attribute_id, + dataType, + att.get >().data()); + break; + case DT::VEC_CFLOAT: + status = H5Awrite( + attribute_id, + dataType, + att.get > >().data()); + break; + case DT::VEC_CDOUBLE: + status = H5Awrite( + attribute_id, + dataType, + att.get > >().data()); + break; + case DT::VEC_CLONG_DOUBLE: + status = H5Awrite( + attribute_id, + dataType, + att.get > >().data()); + break; + case DT::VEC_STRING: { + auto vs = att.get >(); + size_t max_len = 0; + for (std::string const &s : vs) + max_len = std::max(max_len, s.size()); + std::unique_ptr c_str(new char[max_len * vs.size()]); + for (size_t i = 0; i < vs.size(); ++i) + strncpy(c_str.get() + i * max_len, vs[i].c_str(), max_len); + status = H5Awrite(attribute_id, dataType, c_str.get()); + break; + } + case DT::ARR_DBL_7: + status = H5Awrite( + attribute_id, dataType, att.get >().data()); + break; + case DT::BOOL: { + bool b = att.get(); + status = H5Awrite(attribute_id, dataType, &b); + break; + } + case DT::UNDEFINED: + case DT::DATATYPE: + throw std::runtime_error( + "[HDF5] Unknown Attribute datatype (HDF5 Attribute write)"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } - VERIFY(status == 0, "[HDF5] Internal error: Failed to write attribute " + name + " at " + concrete_h5_file_position(writable)); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to write attribute " + name + " at " + + concrete_h5_file_position(writable)); status = H5Tclose(dataType); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 datatype during Attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 datatype during Attribute " + "write"); status = H5Aclose(attribute_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute " + name + " at " + concrete_h5_file_position(writable) + " during attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute " + name + " at " + + concrete_h5_file_position(writable) + " during attribute write"); status = H5Oclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close " + concrete_h5_file_position(writable) + " during attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + " during attribute write"); status = H5Pclose(fapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during attribute " + "write"); m_fileNames[writable] = file.name; } -void -HDF5IOHandlerImpl::readDataset(Writable* writable, - Parameter< Operation::READ_DATASET > & parameters) +void HDF5IOHandlerImpl::readDataset( + Writable *writable, Parameter ¶meters) { - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t dataset_id, memspace, filespace; herr_t status; - dataset_id = H5Dopen(file.id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset read"); + dataset_id = H5Dopen( + file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "read"); - std::vector< hsize_t > start; - for( auto const& val : parameters.offset ) + std::vector start; + for (auto const &val : parameters.offset) start.push_back(static_cast(val)); - std::vector< hsize_t > stride(start.size(), 1); /* contiguous region */ - std::vector< hsize_t > count(start.size(), 1); /* single region */ - std::vector< hsize_t > block; - for( auto const& val : parameters.extent ) - block.push_back(static_cast< hsize_t >(val)); - memspace = H5Screate_simple(static_cast< int >(block.size()), block.data(), nullptr); + std::vector stride(start.size(), 1); /* contiguous region */ + std::vector count(start.size(), 1); /* single region */ + std::vector block; + for (auto const &val : parameters.extent) + block.push_back(static_cast(val)); + memspace = + H5Screate_simple(static_cast(block.size()), block.data(), nullptr); filespace = H5Dget_space(dataset_id); - status = H5Sselect_hyperslab(filespace, - H5S_SELECT_SET, - start.data(), - stride.data(), - count.data(), - block.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to select hyperslab during dataset read"); + status = H5Sselect_hyperslab( + filespace, + H5S_SELECT_SET, + start.data(), + stride.data(), + count.data(), + block.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to select hyperslab during dataset " + "read"); - void* data = parameters.data.get(); + void *data = parameters.data.get(); Attribute a(0); a.dtype = parameters.dtype; - switch( a.dtype ) + switch (a.dtype) { using DT = Datatype; - case DT::LONG_DOUBLE: - case DT::DOUBLE: - case DT::FLOAT: - case DT::CLONG_DOUBLE: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::BOOL: - break; - case DT::UNDEFINED: - throw std::runtime_error("[HDF5] Unknown Attribute datatype (HDF5 Dataset read)"); - case DT::DATATYPE: - throw std::runtime_error("[HDF5] Meta-Datatype leaked into IO"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); + case DT::LONG_DOUBLE: + case DT::DOUBLE: + case DT::FLOAT: + case DT::CLONG_DOUBLE: + case DT::CDOUBLE: + case DT::CFLOAT: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::CHAR: + case DT::UCHAR: + case DT::BOOL: + break; + case DT::UNDEFINED: + throw std::runtime_error( + "[HDF5] Unknown Attribute datatype (HDF5 Dataset read)"); + case DT::DATATYPE: + throw std::runtime_error("[HDF5] Meta-Datatype leaked into IO"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t dataType = getH5DataType(a); - VERIFY(dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset read"); - status = H5Dread(dataset_id, - dataType, - memspace, - filespace, - m_datasetTransferProperty, - data); + VERIFY( + dataType >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " + "read"); + status = H5Dread( + dataset_id, + dataType, + memspace, + filespace, + m_datasetTransferProperty, + data); VERIFY(status == 0, "[HDF5] Internal error: Failed to read dataset"); status = H5Tclose(dataType); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset datatype during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset datatype during " + "dataset read"); status = H5Sclose(filespace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset file space during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset file space during " + "dataset read"); status = H5Sclose(memspace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset memory space during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset memory space during " + "dataset read"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset during dataset read"); } -void -HDF5IOHandlerImpl::readAttribute(Writable* writable, - Parameter< Operation::READ_ATT >& parameters) +void HDF5IOHandlerImpl::readAttribute( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during attribute read"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during " + "attribute read"); - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t obj_id, attr_id; herr_t status; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(fapl, true); } #endif - obj_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - fapl); - VERIFY(obj_id >= 0, std::string("[HDF5] Internal error: Failed to open HDF5 object '") + - concrete_h5_file_position(writable).c_str() + "' during attribute read"); - std::string const & attr_name = parameters.name; - attr_id = H5Aopen(obj_id, - attr_name.c_str(), - H5P_DEFAULT); - VERIFY(attr_id >= 0, + obj_id = + H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); + VERIFY( + obj_id >= 0, + std::string("[HDF5] Internal error: Failed to open HDF5 object '") + + concrete_h5_file_position(writable).c_str() + + "' during attribute read"); + std::string const &attr_name = parameters.name; + attr_id = H5Aopen(obj_id, attr_name.c_str(), H5P_DEFAULT); + VERIFY( + attr_id >= 0, std::string("[HDF5] Internal error: Failed to open HDF5 attribute '") + - attr_name + "' (" + - concrete_h5_file_position(writable).c_str() + ") during attribute read"); + attr_name + "' (" + concrete_h5_file_position(writable).c_str() + + ") during attribute read"); hid_t attr_type, attr_space; attr_type = H5Aget_type(attr_id); attr_space = H5Aget_space(attr_id); int ndims = H5Sget_simple_extent_ndims(attr_space); - std::vector< hsize_t > dims(ndims, 0); - std::vector< hsize_t > maxdims(ndims, 0); + std::vector dims(ndims, 0); + std::vector maxdims(ndims, 0); - status = H5Sget_simple_extent_dims(attr_space, - dims.data(), - maxdims.data()); - VERIFY(status == ndims, "[HDF5] Internal error: Failed to get dimensions during attribute read"); + status = H5Sget_simple_extent_dims(attr_space, dims.data(), maxdims.data()); + VERIFY( + status == ndims, + "[HDF5] Internal error: Failed to get dimensions during attribute " + "read"); H5S_class_t attr_class = H5Sget_simple_extent_type(attr_space); Attribute a(0); - if( attr_class == H5S_SCALAR || (attr_class == H5S_SIMPLE && ndims == 1 && dims[0] == 1) ) + if (attr_class == H5S_SCALAR || + (attr_class == H5S_SIMPLE && ndims == 1 && dims[0] == 1)) { - if( H5Tequal(attr_type, H5T_NATIVE_CHAR) ) + if (H5Tequal(attr_type, H5T_NATIVE_CHAR)) { char c; - status = H5Aread(attr_id, - attr_type, - &c); + status = H5Aread(attr_id, attr_type, &c); a = Attribute(c); - } else if( H5Tequal(attr_type, H5T_NATIVE_UCHAR) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UCHAR)) { unsigned char u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_SHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_SHORT)) { short i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_INT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_INT)) { int i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_LONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LONG)) { long i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_LLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LLONG)) { long long i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_USHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_USHORT)) { unsigned short u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_UINT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UINT)) { unsigned int u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULONG)) { unsigned long u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULLONG)) { unsigned long long u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_FLOAT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_FLOAT)) { float f; - status = H5Aread(attr_id, - attr_type, - &f); + status = H5Aread(attr_id, attr_type, &f); a = Attribute(f); - } else if( H5Tequal(attr_type, H5T_NATIVE_DOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_DOUBLE)) { double d; - status = H5Aread(attr_id, - attr_type, - &d); + status = H5Aread(attr_id, attr_type, &d); a = Attribute(d); - } else if( H5Tequal(attr_type, H5T_NATIVE_LDOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LDOUBLE)) { long double l; - status = H5Aread(attr_id, - attr_type, - &l); + status = H5Aread(attr_id, attr_type, &l); a = Attribute(l); - } else if( H5Tget_class(attr_type) == H5T_STRING ) + } + else if (H5Tget_class(attr_type) == H5T_STRING) { - if( H5Tis_variable_str(attr_type) ) + if (H5Tis_variable_str(attr_type)) { // refs.: // https://github.com/HDFGroup/hdf5/blob/hdf5-1_12_0/tools/src/h5dump/h5dump_xml.c - hsize_t size = H5Tget_size(attr_type); // not yet the actual string length - std::vector< char > vc(size); // byte buffer to vlen strings - status = H5Aread(attr_id, - attr_type, - vc.data()); - auto c_str = *((char**)vc.data()); // get actual string out + hsize_t size = + H5Tget_size(attr_type); // not yet the actual string length + std::vector vc(size); // byte buffer to vlen strings + status = H5Aread(attr_id, attr_type, vc.data()); + auto c_str = *((char **)vc.data()); // get actual string out a = Attribute(std::string(c_str)); // free dynamically allocated vlen memory from H5Aread H5Dvlen_reclaim(attr_type, attr_space, H5P_DEFAULT, vc.data()); // 1.12+: - //H5Treclaim(attr_type, attr_space, H5P_DEFAULT, vc.data()); - } else + // H5Treclaim(attr_type, attr_space, H5P_DEFAULT, vc.data()); + } + else { hsize_t size = H5Tget_size(attr_type); - std::vector< char > vc(size); - status = H5Aread(attr_id, - attr_type, - vc.data()); - a = Attribute(auxiliary::strip(std::string(vc.data(), size), {'\0'})); + std::vector vc(size); + status = H5Aread(attr_id, attr_type, vc.data()); + a = Attribute( + auxiliary::strip(std::string(vc.data(), size), {'\0'})); } - } else if( H5Tget_class(attr_type) == H5T_ENUM ) + } + else if (H5Tget_class(attr_type) == H5T_ENUM) { bool attrIsBool = false; - if( H5Tget_nmembers(attr_type) == 2 ) + if (H5Tget_nmembers(attr_type) == 2) { - char* m0 = H5Tget_member_name(attr_type, 0); - char* m1 = H5Tget_member_name(attr_type, 1); - if( m0 != nullptr && m1 != nullptr ) - if( (strncmp("TRUE" , m0, 4) == 0) && (strncmp("FALSE", m1, 5) == 0) ) + char *m0 = H5Tget_member_name(attr_type, 0); + char *m1 = H5Tget_member_name(attr_type, 1); + if (m0 != nullptr && m1 != nullptr) + if ((strncmp("TRUE", m0, 4) == 0) && + (strncmp("FALSE", m1, 5) == 0)) attrIsBool = true; H5free_memory(m1); H5free_memory(m0); } - if( attrIsBool ) + if (attrIsBool) { int8_t enumVal; - status = H5Aread(attr_id, - attr_type, - &enumVal); - a = Attribute(static_cast< bool >(enumVal)); - } else - throw unsupported_data_error("[HDF5] Unsupported attribute enumeration"); - } else if( H5Tget_class(attr_type) == H5T_COMPOUND ) + status = H5Aread(attr_id, attr_type, &enumVal); + a = Attribute(static_cast(enumVal)); + } + else + throw unsupported_data_error( + "[HDF5] Unsupported attribute enumeration"); + } + else if (H5Tget_class(attr_type) == H5T_COMPOUND) { bool isComplexType = false; - if( H5Tget_nmembers(attr_type) == 2 ) + if (H5Tget_nmembers(attr_type) == 2) { - char* m0 = H5Tget_member_name(attr_type, 0); - char* m1 = H5Tget_member_name(attr_type, 1); - if( m0 != nullptr && m1 != nullptr ) - if( (strncmp("r" , m0, 1) == 0) && (strncmp("i", m1, 1) == 0) ) + char *m0 = H5Tget_member_name(attr_type, 0); + char *m1 = H5Tget_member_name(attr_type, 1); + if (m0 != nullptr && m1 != nullptr) + if ((strncmp("r", m0, 1) == 0) && + (strncmp("i", m1, 1) == 0)) isComplexType = true; H5free_memory(m1); H5free_memory(m0); @@ -1617,225 +1823,225 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable, // re-implement legacy libSplash attributes for ColDim // see: include/splash/basetypes/ColTypeDim.hpp - bool isLegacyLibSplashAttr = ( - H5Tget_nmembers(attr_type) == 3 && - H5Tget_size(attr_type) == sizeof(hsize_t) * 3 - ); - if( isLegacyLibSplashAttr ) + bool isLegacyLibSplashAttr = + (H5Tget_nmembers(attr_type) == 3 && + H5Tget_size(attr_type) == sizeof(hsize_t) * 3); + if (isLegacyLibSplashAttr) { - char* m0 = H5Tget_member_name(attr_type, 0); - char* m1 = H5Tget_member_name(attr_type, 1); - char* m2 = H5Tget_member_name(attr_type, 2); - if( m0 == nullptr || m1 == nullptr || m2 == nullptr ) + char *m0 = H5Tget_member_name(attr_type, 0); + char *m1 = H5Tget_member_name(attr_type, 1); + char *m2 = H5Tget_member_name(attr_type, 2); + if (m0 == nullptr || m1 == nullptr || m2 == nullptr) + // clang-format off isLegacyLibSplashAttr = false; // NOLINT(bugprone-branch-clone) - else if(strcmp("x", m0) != 0 || strcmp("y", m1) != 0 || strcmp("z", m2) != 0) + // clang-format on + else if ( + strcmp("x", m0) != 0 || strcmp("y", m1) != 0 || + strcmp("z", m2) != 0) isLegacyLibSplashAttr = false; H5free_memory(m2); H5free_memory(m1); H5free_memory(m0); } - if( isLegacyLibSplashAttr ) + if (isLegacyLibSplashAttr) { - std::vector< hsize_t > vc(3, 0); - status = H5Aread(attr_id, - attr_type, - vc.data()); + std::vector vc(3, 0); + status = H5Aread(attr_id, attr_type, vc.data()); a = Attribute(vc); - } else if( isComplexType ) + } + else if (isComplexType) { size_t complexSize = H5Tget_member_offset(attr_type, 1); - if( complexSize == sizeof(float) ) + if (complexSize == sizeof(float)) { - std::complex< float > cf; + std::complex cf; status = H5Aread(attr_id, attr_type, &cf); a = Attribute(cf); } - else if( complexSize == sizeof(double) ) + else if (complexSize == sizeof(double)) { - std::complex< double > cd; + std::complex cd; status = H5Aread(attr_id, attr_type, &cd); a = Attribute(cd); } - else if( complexSize == sizeof(long double) ) + else if (complexSize == sizeof(long double)) { - std::complex< long double > cld; + std::complex cld; status = H5Aread(attr_id, attr_type, &cld); a = Attribute(cld); } else - throw unsupported_data_error("[HDF5] Unknown complex type representation"); + throw unsupported_data_error( + "[HDF5] Unknown complex type representation"); } else - throw unsupported_data_error("[HDF5] Compound attribute type not supported"); + throw unsupported_data_error( + "[HDF5] Compound attribute type not supported"); } else - throw std::runtime_error("[HDF5] Unsupported scalar attribute type"); - } else if( attr_class == H5S_SIMPLE ) + throw std::runtime_error( + "[HDF5] Unsupported scalar attribute type"); + } + else if (attr_class == H5S_SIMPLE) { - if( ndims != 1 ) - throw std::runtime_error("[HDF5] Unsupported attribute (array with ndims != 1)"); + if (ndims != 1) + throw std::runtime_error( + "[HDF5] Unsupported attribute (array with ndims != 1)"); - if( H5Tequal(attr_type, H5T_NATIVE_CHAR) ) + if (H5Tequal(attr_type, H5T_NATIVE_CHAR)) { - std::vector< char > vc(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vc.data()); + std::vector vc(dims[0], 0); + status = H5Aread(attr_id, attr_type, vc.data()); a = Attribute(vc); - } else if( H5Tequal(attr_type, H5T_NATIVE_UCHAR) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UCHAR)) { - std::vector< unsigned char > vu(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vu.data()); + std::vector vu(dims[0], 0); + status = H5Aread(attr_id, attr_type, vu.data()); a = Attribute(vu); - } else if( H5Tequal(attr_type, H5T_NATIVE_SHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_SHORT)) { - std::vector< short > vint16(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint16.data()); + std::vector vint16(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint16.data()); a = Attribute(vint16); - } else if( H5Tequal(attr_type, H5T_NATIVE_INT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_INT)) { - std::vector< int > vint32(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint32.data()); + std::vector vint32(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint32.data()); a = Attribute(vint32); - } else if( H5Tequal(attr_type, H5T_NATIVE_LONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LONG)) { - std::vector< long > vint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint64.data()); + std::vector vint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint64.data()); a = Attribute(vint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_LLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LLONG)) { - std::vector< long long > vint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint64.data()); + std::vector vint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint64.data()); a = Attribute(vint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_USHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_USHORT)) { - std::vector< unsigned short > vuint16(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint16.data()); + std::vector vuint16(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint16.data()); a = Attribute(vuint16); - } else if( H5Tequal(attr_type, H5T_NATIVE_UINT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UINT)) { - std::vector< unsigned int > vuint32(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint32.data()); + std::vector vuint32(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint32.data()); a = Attribute(vuint32); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULONG)) { - std::vector< unsigned long > vuint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint64.data()); + std::vector vuint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint64.data()); a = Attribute(vuint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULLONG)) { - std::vector< unsigned long long > vuint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint64.data()); + std::vector vuint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint64.data()); a = Attribute(vuint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_FLOAT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_FLOAT)) { - std::vector< float > vf(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vf.data()); + std::vector vf(dims[0], 0); + status = H5Aread(attr_id, attr_type, vf.data()); a = Attribute(vf); - } else if( H5Tequal(attr_type, H5T_NATIVE_DOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_DOUBLE)) { - if( dims[0] == 7 && attr_name == "unitDimension" ) + if (dims[0] == 7 && attr_name == "unitDimension") { - std::array< double, 7 > ad; - status = H5Aread(attr_id, - attr_type, - &ad); + std::array ad; + status = H5Aread(attr_id, attr_type, &ad); a = Attribute(ad); - } else + } + else { - std::vector< double > vd(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vd.data()); + std::vector vd(dims[0], 0); + status = H5Aread(attr_id, attr_type, vd.data()); a = Attribute(vd); } - } else if( H5Tequal(attr_type, H5T_NATIVE_LDOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LDOUBLE)) { - std::vector< long double > vld(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vld.data()); + std::vector vld(dims[0], 0); + status = H5Aread(attr_id, attr_type, vld.data()); a = Attribute(vld); - } else if( H5Tequal(attr_type, m_H5T_CFLOAT) ) + } + else if (H5Tequal(attr_type, m_H5T_CFLOAT)) { - std::vector< std::complex< float > > vcf(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vcf.data()); + std::vector > vcf(dims[0], 0); + status = H5Aread(attr_id, attr_type, vcf.data()); a = Attribute(vcf); - } else if( H5Tequal(attr_type, m_H5T_CDOUBLE) ) + } + else if (H5Tequal(attr_type, m_H5T_CDOUBLE)) { - std::vector< std::complex< double > > vcd(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vcd.data()); + std::vector > vcd(dims[0], 0); + status = H5Aread(attr_id, attr_type, vcd.data()); a = Attribute(vcd); - } else if( H5Tequal(attr_type, m_H5T_CLONG_DOUBLE) ) + } + else if (H5Tequal(attr_type, m_H5T_CLONG_DOUBLE)) { - std::vector< std::complex< long double > > vcld(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vcld.data()); + std::vector > vcld(dims[0], 0); + status = H5Aread(attr_id, attr_type, vcld.data()); a = Attribute(vcld); - } else if( H5Tget_class(attr_type) == H5T_STRING ) + } + else if (H5Tget_class(attr_type) == H5T_STRING) { - std::vector< std::string > vs; - if( H5Tis_variable_str(attr_type) ) + std::vector vs; + if (H5Tis_variable_str(attr_type)) { - std::vector< char * > vc(dims[0]); - status = H5Aread(attr_id, - attr_type, - vc.data()); - VERIFY(status == 0, - "[HDF5] Internal error: Failed to read attribute " + attr_name + - " at " + concrete_h5_file_position(writable)); - for( auto const& val : vc ) + std::vector vc(dims[0]); + status = H5Aread(attr_id, attr_type, vc.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to read attribute " + + attr_name + " at " + + concrete_h5_file_position(writable)); + for (auto const &val : vc) vs.push_back(auxiliary::strip(std::string(val), {'\0'})); - status = H5Dvlen_reclaim(attr_type, - attr_space, - H5P_DEFAULT, - vc.data()); - } else + status = H5Dvlen_reclaim( + attr_type, attr_space, H5P_DEFAULT, vc.data()); + } + else { size_t length = H5Tget_size(attr_type); - std::vector< char > c(dims[0] * length); - status = H5Aread(attr_id, - attr_type, - c.data()); - for( hsize_t i = 0; i < dims[0]; ++i ) - vs.push_back(auxiliary::strip(std::string(c.data() + i*length, length), {'\0'})); + std::vector c(dims[0] * length); + status = H5Aread(attr_id, attr_type, c.data()); + for (hsize_t i = 0; i < dims[0]; ++i) + vs.push_back(auxiliary::strip( + std::string(c.data() + i * length, length), {'\0'})); } a = Attribute(vs); - } else - throw std::runtime_error("[HDF5] Unsupported simple attribute type"); - } else + } + else + throw std::runtime_error( + "[HDF5] Unsupported simple attribute type"); + } + else throw std::runtime_error("[HDF5] Unsupported attribute class"); - VERIFY(status == 0, "[HDF5] Internal error: Failed to read attribute " + attr_name + " at " + concrete_h5_file_position(writable)); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to read attribute " + attr_name + + " at " + concrete_h5_file_position(writable)); status = H5Tclose(attr_type); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute datatype during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute datatype during " + "attribute read"); status = H5Sclose(attr_space); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute file space during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute file space during " + "attribute read"); auto dtype = parameters.dtype; *dtype = a.dtype; @@ -1843,212 +2049,259 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable, *resource = a.getResource(); status = H5Aclose(attr_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute " + attr_name + " at " + concrete_h5_file_position(writable) + " during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute " + attr_name + + " at " + concrete_h5_file_position(writable) + + " during attribute read"); status = H5Oclose(obj_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close " + concrete_h5_file_position(writable) + " during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + " during attribute read"); status = H5Pclose(fapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 attribute during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 attribute during " + "attribute read"); } -void -HDF5IOHandlerImpl::listPaths(Writable* writable, - Parameter< Operation::LIST_PATHS > & parameters) +void HDF5IOHandlerImpl::listPaths( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during path listing"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during path " + "listing"); - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path listing"); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path listing"); H5G_info_t group_info; herr_t status = H5Gget_info(node_id, &group_info); - VERIFY(status == 0, "[HDF5] Internal error: Failed to get HDF5 group info for " + concrete_h5_file_position(writable) + " during path listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to get HDF5 group info for " + + concrete_h5_file_position(writable) + " during path listing"); auto paths = parameters.paths; - for( hsize_t i = 0; i < group_info.nlinks; ++i ) + for (hsize_t i = 0; i < group_info.nlinks; ++i) { - if( H5G_GROUP == H5Gget_objtype_by_idx(node_id, i) ) + if (H5G_GROUP == H5Gget_objtype_by_idx(node_id, i)) { ssize_t name_length = H5Gget_objname_by_idx(node_id, i, nullptr, 0); - std::vector< char > name(name_length+1); - H5Gget_objname_by_idx(node_id, i, name.data(), name_length+1); + std::vector name(name_length + 1); + H5Gget_objname_by_idx(node_id, i, name.data(), name_length + 1); paths->push_back(std::string(name.data(), name_length)); } } status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group " + concrete_h5_file_position(writable) + " during path listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group " + + concrete_h5_file_position(writable) + " during path listing"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during path listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during path " + "listing"); } -void -HDF5IOHandlerImpl::listDatasets(Writable* writable, - Parameter< Operation::LIST_DATASETS >& parameters) +void HDF5IOHandlerImpl::listDatasets( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during dataset listing"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during dataset " + "listing"); - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset listing"); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "listing"); H5G_info_t group_info; herr_t status = H5Gget_info(node_id, &group_info); - VERIFY(status == 0, "[HDF5] Internal error: Failed to get HDF5 group info for " + concrete_h5_file_position(writable) + " during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to get HDF5 group info for " + + concrete_h5_file_position(writable) + " during dataset listing"); auto datasets = parameters.datasets; - for( hsize_t i = 0; i < group_info.nlinks; ++i ) + for (hsize_t i = 0; i < group_info.nlinks; ++i) { - if( H5G_DATASET == H5Gget_objtype_by_idx(node_id, i) ) + if (H5G_DATASET == H5Gget_objtype_by_idx(node_id, i)) { ssize_t name_length = H5Gget_objname_by_idx(node_id, i, nullptr, 0); - std::vector< char > name(name_length+1); - H5Gget_objname_by_idx(node_id, i, name.data(), name_length+1); + std::vector name(name_length + 1); + H5Gget_objname_by_idx(node_id, i, name.data(), name_length + 1); datasets->push_back(std::string(name.data(), name_length)); } } status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group " + concrete_h5_file_position(writable) + " during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group " + + concrete_h5_file_position(writable) + " during dataset listing"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during dataset " + "listing"); } -void HDF5IOHandlerImpl::listAttributes(Writable* writable, - Parameter< Operation::LIST_ATTS >& parameters) +void HDF5IOHandlerImpl::listAttributes( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during attribute listing"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during " + "attribute listing"); - auto res = getFile( writable ); - File file = res ? res.get() : getFile( writable->parent ).get(); + auto res = getFile(writable); + File file = res ? res.get() : getFile(writable->parent).get(); hid_t node_id; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(fapl, true); } #endif - node_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - fapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during attribute listing"); + node_id = + H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during attribute " + "listing"); herr_t status; -#if H5_VERSION_GE(1,12,0) +#if H5_VERSION_GE(1, 12, 0) H5O_info2_t object_info; status = H5Oget_info3(node_id, &object_info, H5O_INFO_NUM_ATTRS); #else H5O_info_t object_info; status = H5Oget_info(node_id, &object_info); #endif - VERIFY(status == 0, "[HDF5] Internal error: Failed to get HDF5 object info for " + concrete_h5_file_position(writable) + " during attribute listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to get HDF5 object info for " + + concrete_h5_file_position(writable) + " during attribute listing"); auto attributes = parameters.attributes; - for( hsize_t i = 0; i < object_info.num_attrs; ++i ) + for (hsize_t i = 0; i < object_info.num_attrs; ++i) { - ssize_t name_length = H5Aget_name_by_idx(node_id, - ".", - H5_INDEX_CRT_ORDER, - H5_ITER_INC, - i, - nullptr, - 0, - H5P_DEFAULT); - std::vector< char > name(name_length+1); - H5Aget_name_by_idx(node_id, - ".", - H5_INDEX_CRT_ORDER, - H5_ITER_INC, - i, - name.data(), - name_length+1, - H5P_DEFAULT); + ssize_t name_length = H5Aget_name_by_idx( + node_id, + ".", + H5_INDEX_CRT_ORDER, + H5_ITER_INC, + i, + nullptr, + 0, + H5P_DEFAULT); + std::vector name(name_length + 1); + H5Aget_name_by_idx( + node_id, + ".", + H5_INDEX_CRT_ORDER, + H5_ITER_INC, + i, + name.data(), + name_length + 1, + H5P_DEFAULT); attributes->push_back(std::string(name.data(), name_length)); } status = H5Oclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 object during attribute listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 object during attribute " + "listing"); status = H5Pclose(fapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during dataset " + "listing"); } -auxiliary::Option< HDF5IOHandlerImpl::File > -HDF5IOHandlerImpl::getFile( Writable * writable ) +auxiliary::Option +HDF5IOHandlerImpl::getFile(Writable *writable) { using namespace auxiliary; - auto it = m_fileNames.find( writable ); - if( it == m_fileNames.end() ) + auto it = m_fileNames.find(writable); + if (it == m_fileNames.end()) { - return Option< File >(); + return Option(); } - auto it2 = m_fileNamesWithID.find( it->second ); - if( it2 == m_fileNamesWithID.end() ) + auto it2 = m_fileNamesWithID.find(it->second); + if (it2 == m_fileNamesWithID.end()) { - return Option< File >(); + return Option(); } File res; res.name = it->second; res.id = it2->second; - return makeOption( std::move( res ) ); + return makeOption(std::move(res)); } #endif #if openPMD_HAVE_HDF5 HDF5IOHandler::HDF5IOHandler(std::string path, Access at, nlohmann::json config) - : AbstractIOHandler(std::move(path), at), - m_impl{new HDF5IOHandlerImpl(this, std::move(config))} -{ } + : AbstractIOHandler(std::move(path), at) + , m_impl{new HDF5IOHandlerImpl(this, std::move(config))} +{} HDF5IOHandler::~HDF5IOHandler() = default; -std::future< void > -HDF5IOHandler::flush() +std::future HDF5IOHandler::flush(internal::FlushParams const &) { return m_impl->flush(); } #else -HDF5IOHandler::HDF5IOHandler(std::string path, Access at, nlohmann::json /* config */) - : AbstractIOHandler(std::move(path), at) +HDF5IOHandler::HDF5IOHandler( + std::string path, Access at, nlohmann::json /* config */) + : AbstractIOHandler(std::move(path), at) { throw std::runtime_error("openPMD-api built without HDF5 support"); } HDF5IOHandler::~HDF5IOHandler() = default; -std::future< void > -HDF5IOHandler::flush() +std::future HDF5IOHandler::flush(internal::FlushParams const &) { - return std::future< void >(); + return std::future(); } #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/HDF5/ParallelHDF5IOHandler.cpp b/src/IO/HDF5/ParallelHDF5IOHandler.cpp index 19c9621393..7557054f41 100644 --- a/src/IO/HDF5/ParallelHDF5IOHandler.cpp +++ b/src/IO/HDF5/ParallelHDF5IOHandler.cpp @@ -23,62 +23,75 @@ #include "openPMD/auxiliary/Environment.hpp" #if openPMD_HAVE_MPI -# include +#include #endif #include #include - namespace openPMD { #if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif ParallelHDF5IOHandler::ParallelHDF5IOHandler( - std::string path, Access at, MPI_Comm comm, nlohmann::json config ) - : AbstractIOHandler(std::move(path), at, comm), - m_impl{new ParallelHDF5IOHandlerImpl(this, comm, std::move(config))} -{ } + std::string path, Access at, MPI_Comm comm, nlohmann::json config) + : AbstractIOHandler(std::move(path), at, comm) + , m_impl{new ParallelHDF5IOHandlerImpl(this, comm, std::move(config))} +{} ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; -std::future< void > -ParallelHDF5IOHandler::flush() +std::future ParallelHDF5IOHandler::flush(internal::FlushParams const &) { return m_impl->flush(); } ParallelHDF5IOHandlerImpl::ParallelHDF5IOHandlerImpl( - AbstractIOHandler* handler, MPI_Comm comm, nlohmann::json config ) - : HDF5IOHandlerImpl{handler, std::move(config)}, - m_mpiComm{comm}, - m_mpiInfo{MPI_INFO_NULL} /* MPI 3.0+: MPI_INFO_ENV */ + AbstractIOHandler *handler, MPI_Comm comm, nlohmann::json config) + : HDF5IOHandlerImpl{handler, std::move(config)} + , m_mpiComm{comm} + , m_mpiInfo{MPI_INFO_NULL} /* MPI 3.0+: MPI_INFO_ENV */ { m_datasetTransferProperty = H5Pcreate(H5P_DATASET_XFER); m_fileAccessProperty = H5Pcreate(H5P_FILE_ACCESS); m_fileCreateProperty = H5Pcreate(H5P_FILE_CREATE); -#if H5_VERSION_GE(1,10,1) - auto const hdf5_spaced_allocation = auxiliary::getEnvString( "OPENPMD_HDF5_PAGED_ALLOCATION", "ON" ); - if( hdf5_spaced_allocation == "ON" ) { - auto const strPageSize = auxiliary::getEnvString( "OPENPMD_HDF5_PAGED_ALLOCATION_SIZE", "33554432" ); +#if H5_VERSION_GE(1, 10, 1) + auto const hdf5_spaced_allocation = + auxiliary::getEnvString("OPENPMD_HDF5_PAGED_ALLOCATION", "ON"); + if (hdf5_spaced_allocation == "ON") + { + auto const strPageSize = auxiliary::getEnvString( + "OPENPMD_HDF5_PAGED_ALLOCATION_SIZE", "33554432"); std::stringstream tstream(strPageSize); hsize_t page_size; tstream >> page_size; - H5Pset_file_space_strategy(m_fileCreateProperty, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)0); + H5Pset_file_space_strategy( + m_fileCreateProperty, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)0); H5Pset_file_space_page_size(m_fileCreateProperty, page_size); } #endif - auto const hdf5_defer_metadata = auxiliary::getEnvString( "OPENPMD_HDF5_DEFER_METADATA", "ON" ); - if( hdf5_defer_metadata == "ON" ) { - auto const strMetaSize = auxiliary::getEnvString( "OPENPMD_HDF5_DEFER_METADATA_SIZE", "33554432" ); + auto const hdf5_defer_metadata = + auxiliary::getEnvString("OPENPMD_HDF5_DEFER_METADATA", "ON"); + if (hdf5_defer_metadata == "ON") + { + auto const strMetaSize = auxiliary::getEnvString( + "OPENPMD_HDF5_DEFER_METADATA_SIZE", "33554432"); std::stringstream tstream(strMetaSize); hsize_t meta_size; tstream >> meta_size; @@ -96,81 +109,96 @@ ParallelHDF5IOHandlerImpl::ParallelHDF5IOHandlerImpl( } H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE; - auto const hdf5_collective = auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "ON" ); - if( hdf5_collective == "ON" ) + auto const hdf5_collective = + auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "ON"); + if (hdf5_collective == "ON") xfer_mode = H5FD_MPIO_INDEPENDENT; else { - VERIFY(hdf5_collective == "OFF", "[HDF5] Internal error: OPENPMD_HDF5_INDEPENDENT property must be either ON or OFF"); + VERIFY( + hdf5_collective == "OFF", + "[HDF5] Internal error: OPENPMD_HDF5_INDEPENDENT property must be " + "either ON or OFF"); } herr_t status; status = H5Pset_dxpl_mpio(m_datasetTransferProperty, xfer_mode); -#if H5_VERSION_GE(1,10,0) - status = H5Pset_all_coll_metadata_ops(m_fileAccessProperty, m_hdf5_collective_metadata); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set metadata read HDF5 file access property"); - - status = H5Pset_coll_metadata_write(m_fileAccessProperty, m_hdf5_collective_metadata); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set metadata write HDF5 file access property"); +#if H5_VERSION_GE(1, 10, 0) + status = H5Pset_all_coll_metadata_ops( + m_fileAccessProperty, m_hdf5_collective_metadata); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set metadata read HDF5 file access " + "property"); + + status = H5Pset_coll_metadata_write( + m_fileAccessProperty, m_hdf5_collective_metadata); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set metadata write HDF5 file access " + "property"); #endif - auto const strByte = auxiliary::getEnvString( "OPENPMD_HDF5_ALIGNMENT", "1" ); + auto const strByte = auxiliary::getEnvString("OPENPMD_HDF5_ALIGNMENT", "1"); std::stringstream sstream(strByte); hsize_t bytes; sstream >> bytes; - auto const strThreshold = auxiliary::getEnvString( "OPENPMD_HDF5_THRESHOLD", "0" ); + auto const strThreshold = + auxiliary::getEnvString("OPENPMD_HDF5_THRESHOLD", "0"); std::stringstream tstream(strThreshold); hsize_t threshold; tstream >> threshold; - if ( bytes > 1 ) - H5Pset_alignment(m_fileAccessProperty, threshold, bytes); + if (bytes > 1) + H5Pset_alignment(m_fileAccessProperty, threshold, bytes); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set HDF5 dataset transfer property"); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set HDF5 dataset transfer property"); status = H5Pset_fapl_mpio(m_fileAccessProperty, m_mpiComm, m_mpiInfo); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set HDF5 file access property"); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set HDF5 file access property"); } ParallelHDF5IOHandlerImpl::~ParallelHDF5IOHandlerImpl() { herr_t status; - while( !m_openFileIDs.empty() ) + while (!m_openFileIDs.empty()) { auto file = m_openFileIDs.begin(); status = H5Fclose(*file); - if( status < 0 ) - std::cerr << "Internal error: Failed to close HDF5 file (parallel)\n"; + if (status < 0) + std::cerr + << "Internal error: Failed to close HDF5 file (parallel)\n"; m_openFileIDs.erase(file); } } #else -# if openPMD_HAVE_MPI -ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, - Access at, - MPI_Comm comm, - nlohmann::json /* config */) - : AbstractIOHandler(std::move(path), at, comm) +#if openPMD_HAVE_MPI +ParallelHDF5IOHandler::ParallelHDF5IOHandler( + std::string path, Access at, MPI_Comm comm, nlohmann::json /* config */) + : AbstractIOHandler(std::move(path), at, comm) { throw std::runtime_error("openPMD-api built without HDF5 support"); } -# else -ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, - Access at, - nlohmann::json /* config */) - : AbstractIOHandler(std::move(path), at) +#else +ParallelHDF5IOHandler::ParallelHDF5IOHandler( + std::string path, Access at, nlohmann::json /* config */) + : AbstractIOHandler(std::move(path), at) { - throw std::runtime_error("openPMD-api built without parallel support and without HDF5 support"); + throw std::runtime_error( + "openPMD-api built without parallel support and without HDF5 support"); } -# endif +#endif ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; -std::future< void > -ParallelHDF5IOHandler::flush() +std::future ParallelHDF5IOHandler::flush(internal::FlushParams const &) { - return std::future< void >(); + return std::future(); } #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/IOTask.cpp b/src/IO/IOTask.cpp index 074cb24c25..3db1a04d4b 100644 --- a/src/IO/IOTask.cpp +++ b/src/IO/IOTask.cpp @@ -21,19 +21,18 @@ #include "openPMD/IO/IOTask.hpp" #include "openPMD/backend/Attributable.hpp" - namespace openPMD { -Writable* -getWritable(AttributableInterface* a) -{ return &a->writable(); } +Writable *getWritable(AttributableInterface *a) +{ + return &a->writable(); +} namespace internal { - std::string - operationAsString( Operation op ) + std::string operationAsString(Operation op) { - switch( op ) + switch (op) { case Operation::CREATE_FILE: return "CREATE_FILE"; @@ -109,5 +108,5 @@ namespace internal break; } } -} -} // openPMD +} // namespace internal +} // namespace openPMD diff --git a/src/IO/InvalidatableFile.cpp b/src/IO/InvalidatableFile.cpp index 723c46b3fe..8d4b688673 100644 --- a/src/IO/InvalidatableFile.cpp +++ b/src/IO/InvalidatableFile.cpp @@ -21,70 +21,62 @@ #include "openPMD/IO/InvalidatableFile.hpp" - -openPMD::InvalidatableFile::InvalidatableFile( std::string s ) : - fileState { std::make_shared< FileState >( s ) } +openPMD::InvalidatableFile::InvalidatableFile(std::string s) + : fileState{std::make_shared(s)} {} - -void openPMD::InvalidatableFile::invalidate( ) +void openPMD::InvalidatableFile::invalidate() { fileState->valid = false; } - -bool openPMD::InvalidatableFile::valid( ) const +bool openPMD::InvalidatableFile::valid() const { return fileState->valid; } - -openPMD::InvalidatableFile & -openPMD::InvalidatableFile::operator=( std::string s ) +openPMD::InvalidatableFile &openPMD::InvalidatableFile::operator=(std::string s) { - if( fileState ) + if (fileState) { fileState->name = s; } else { - fileState = std::make_shared< FileState >( s ); + fileState = std::make_shared(s); } return *this; } - -bool -openPMD::InvalidatableFile::operator==( const openPMD::InvalidatableFile & f ) const +bool openPMD::InvalidatableFile::operator==( + const openPMD::InvalidatableFile &f) const { return this->fileState == f.fileState; } - -std::string & openPMD::InvalidatableFile::operator*( ) const +std::string &openPMD::InvalidatableFile::operator*() const { return fileState->name; } - -std::string * openPMD::InvalidatableFile::operator->( ) const +std::string *openPMD::InvalidatableFile::operator->() const { return &fileState->name; } - -openPMD::InvalidatableFile::operator bool( ) const +openPMD::InvalidatableFile::operator bool() const { - return fileState.operator bool( ); + return fileState.operator bool(); } - -openPMD::InvalidatableFile::FileState::FileState( std::string s ) : - name { std::move( s ) } +openPMD::InvalidatableFile::FileState::FileState(std::string s) + : name{std::move(s)} {} -std::hash< openPMD::InvalidatableFile >::result_type -std::hash< openPMD::InvalidatableFile >::operator()( const openPMD::InvalidatableFile & s ) const noexcept +std::hash::result_type +std::hash::operator()( + const openPMD::InvalidatableFile &s) const noexcept { - return std::hash< shared_ptr< openPMD::InvalidatableFile::FileState>> {}( s.fileState ); + return std::hash>{}( + s.fileState); } diff --git a/src/IO/JSON/JSONFilePosition.cpp b/src/IO/JSON/JSONFilePosition.cpp index 536000036c..3232d52430 100644 --- a/src/IO/JSON/JSONFilePosition.cpp +++ b/src/IO/JSON/JSONFilePosition.cpp @@ -2,10 +2,8 @@ #include - namespace openPMD { - JSONFilePosition::JSONFilePosition( json::json_pointer ptr): - id( std::move( ptr ) ) - {} -} +JSONFilePosition::JSONFilePosition(json::json_pointer ptr) : id(std::move(ptr)) +{} +} // namespace openPMD diff --git a/src/IO/JSON/JSONIOHandler.cpp b/src/IO/JSON/JSONIOHandler.cpp index ff95774440..15d18194c7 100644 --- a/src/IO/JSON/JSONIOHandler.cpp +++ b/src/IO/JSON/JSONIOHandler.cpp @@ -21,24 +21,16 @@ #include "openPMD/IO/JSON/JSONIOHandler.hpp" - namespace openPMD { - JSONIOHandler::~JSONIOHandler( ) = default; +JSONIOHandler::~JSONIOHandler() = default; - JSONIOHandler::JSONIOHandler( - std::string path, - Access at - ) : - AbstractIOHandler { - path, - at - }, - m_impl { JSONIOHandlerImpl { this } } - {} +JSONIOHandler::JSONIOHandler(std::string path, Access at) + : AbstractIOHandler{path, at}, m_impl{JSONIOHandlerImpl{this}} +{} - std::future< void > JSONIOHandler::flush( ) - { - return m_impl.flush( ); - } -} // openPMD +std::future JSONIOHandler::flush(internal::FlushParams const &) +{ + return m_impl.flush(); +} +} // namespace openPMD diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index dc9298ce3d..45396e5d6b 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -19,1829 +19,1437 @@ * If not, see . */ +#include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/DatatypeHelpers.hpp" #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/auxiliary/Option.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/backend/Writable.hpp" -#include "openPMD/Datatype.hpp" -#include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" #include #include - namespace openPMD { #if openPMD_USE_VERIFY -# define VERIFY( CONDITION, TEXT ) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } #else -# define VERIFY( CONDITION, TEXT ) do{ (void)sizeof(CONDITION); } while( 0 ); +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0); #endif -#define VERIFY_ALWAYS( CONDITION, TEXT ) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } - +#define VERIFY_ALWAYS(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } - JSONIOHandlerImpl::JSONIOHandlerImpl( AbstractIOHandler * handler ) : - AbstractIOHandlerImpl( handler ) - {} +JSONIOHandlerImpl::JSONIOHandlerImpl(AbstractIOHandler *handler) + : AbstractIOHandlerImpl(handler) +{} +JSONIOHandlerImpl::~JSONIOHandlerImpl() +{ + // we must not throw in a destructor + try + { + flush(); + } + catch (std::exception const &ex) + { + std::cerr << "[~JSONIOHandlerImpl] An error occurred: " << ex.what() + << std::endl; + } + catch (...) + { + std::cerr << "[~JSONIOHandlerImpl] An error occurred." << std::endl; + } +} - JSONIOHandlerImpl::~JSONIOHandlerImpl( ) +std::future JSONIOHandlerImpl::flush() +{ + AbstractIOHandlerImpl::flush(); + for (auto const &file : m_dirty) { - // we must not throw in a destructor - try - { - flush( ); - } - catch( std::exception const & ex ) - { - std::cerr << "[~JSONIOHandlerImpl] An error occurred: " << ex.what() << std::endl; - } - catch( ... ) - { - std::cerr << "[~JSONIOHandlerImpl] An error occurred." << std::endl; - } + putJsonContents(file, false); } + m_dirty.clear(); + return std::future(); +} +void JSONIOHandlerImpl::createFile( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Creating a file in read-only mode is not possible."); - std::future< void > JSONIOHandlerImpl::flush( ) + if (!writable->written) { - AbstractIOHandlerImpl::flush( ); - for( auto const & file: m_dirty ) + std::string name = parameters.name; + if (!auxiliary::ends_with(name, ".json")) { - putJsonContents( - file, - false - ); + name += ".json"; } - m_dirty.clear( ); - return std::future< void >( ); - } + auto res_pair = getPossiblyExisting(name); + File shared_name = File(name); + VERIFY_ALWAYS( + !(m_handler->m_backendAccess == Access::READ_WRITE && + (!std::get<2>(res_pair) || + auxiliary::file_exists(fullPath(std::get<0>(res_pair))))), + "[JSON] Can only overwrite existing file in CREATE mode."); - void JSONIOHandlerImpl::createFile( - Writable * writable, - Parameter< Operation::CREATE_FILE > const & parameters - ) - { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Creating a file in read-only mode is not possible." ); + if (!std::get<2>(res_pair)) + { + auto file = std::get<0>(res_pair); + m_dirty.erase(file); + m_jsonVals.erase(file); + file.invalidate(); + } - if( !writable->written ) + std::string const dir(m_handler->directory); + if (!auxiliary::directory_exists(dir)) { - std::string name = parameters.name; - if( !auxiliary::ends_with( - name, - ".json" - ) ) - { - name += ".json"; - } + auto success = auxiliary::create_directories(dir); + VERIFY(success, "[JSON] Could not create directory."); + } - auto res_pair = getPossiblyExisting( name ); - File shared_name = File( name ); - VERIFY_ALWAYS( !(m_handler->m_backendAccess == Access::READ_WRITE && - ( !std::get< 2 >( res_pair ) || - auxiliary::file_exists( fullPath( std::get< 0 >( res_pair ) ) ) ) ), - "[JSON] Can only overwrite existing file in CREATE mode." ); + associateWithFile(writable, shared_name); + this->m_dirty.emplace(shared_name); + // make sure to overwrite! + this->m_jsonVals[shared_name] = std::make_shared(); - if( !std::get< 2 >( res_pair ) ) - { - auto file = std::get< 0 >( res_pair ); - m_dirty.erase( file ); - m_jsonVals.erase( file ); - file.invalidate( ); - } + writable->written = true; + writable->abstractFilePosition = std::make_shared(); + } +} - std::string const dir( m_handler->directory ); - if( !auxiliary::directory_exists( dir ) ) - { - auto success = auxiliary::create_directories( dir ); - VERIFY( success, - "[JSON] Could not create directory." ); - } +void JSONIOHandlerImpl::createPath( + Writable *writable, Parameter const ¶meter) +{ + std::string path = parameter.path; + /* Sanitize: + * The JSON API does not like to have slashes in the end. + */ + if (auxiliary::ends_with(path, "/")) + { + path = auxiliary::replace_last(path, "/", ""); + } - associateWithFile( - writable, - shared_name - ); - this->m_dirty - .emplace( shared_name ); - // make sure to overwrite! - this->m_jsonVals[shared_name] = - std::make_shared< nlohmann::json >( ); + auto file = refreshFileFromParent(writable); + auto *jsonVal = &*obtainJsonContents(file); + if (!auxiliary::starts_with(path, "/")) + { // path is relative + auto filepos = setAndGetFilePosition(writable, false); - writable->written = true; - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( ); - } + jsonVal = &(*jsonVal)[filepos->id]; + ensurePath(jsonVal, path); + path = filepos->id.to_string() + "/" + path; + } + else + { + + ensurePath(jsonVal, path); } + m_dirty.emplace(file); + writable->written = true; + writable->abstractFilePosition = + std::make_shared(nlohmann::json::json_pointer(path)); +} - void JSONIOHandlerImpl::createPath( - Writable * writable, - Parameter< Operation::CREATE_PATH > const & parameter - ) +void JSONIOHandlerImpl::createDataset( + Writable *writable, Parameter const ¶meter) +{ + if (m_handler->m_backendAccess == Access::READ_ONLY) { - std::string path = parameter.path; - /* Sanitize: - * The JSON API does not like to have slashes in the end. - */ - if( auxiliary::ends_with( - path, - "/" - ) ) - { - path = auxiliary::replace_last( - path, - "/", - "" - ); - } + throw std::runtime_error( + "[JSON] Creating a dataset in a file opened as read only is not " + "possible."); + } + if (!writable->written) + { + /* Sanitize name */ + std::string name = removeSlashes(parameter.name); - auto file = refreshFileFromParent( writable ); - - auto * jsonVal = &*obtainJsonContents( file ); - if( !auxiliary::starts_with( - path, - "/" - ) ) - { // path is relative - auto filepos = setAndGetFilePosition( - writable, - false - ); - - jsonVal = &( *jsonVal )[filepos->id]; - ensurePath( - jsonVal, - path - ); - path = - filepos->id - .to_string( ) + "/" + path; + auto file = refreshFileFromParent(writable); + setAndGetFilePosition(writable); + auto &jsonVal = obtainJsonContents(writable); + // be sure to have a JSON object, not a list + if (jsonVal.empty()) + { + jsonVal = nlohmann::json::object(); } - else + setAndGetFilePosition(writable, name); + auto &dset = jsonVal[name]; + dset["datatype"] = datatypeToString(parameter.dtype); + switch (parameter.dtype) { - - ensurePath( - jsonVal, - path - ); + case Datatype::CFLOAT: + case Datatype::CDOUBLE: + case Datatype::CLONG_DOUBLE: { + auto complexExtent = parameter.extent; + complexExtent.push_back(2); + dset["data"] = initializeNDArray(complexExtent); + break; + } + default: + dset["data"] = initializeNDArray(parameter.extent); + break; } - - m_dirty.emplace( file ); writable->written = true; - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( nlohmann::json::json_pointer( path ) ); + m_dirty.emplace(file); } +} - - void JSONIOHandlerImpl::createDataset( - Writable * writable, - Parameter< Operation::CREATE_DATASET > const & parameter - ) +namespace +{ + void mergeInto(nlohmann::json &into, nlohmann::json &from); + void mergeInto(nlohmann::json &into, nlohmann::json &from) { - if(m_handler->m_backendAccess == Access::READ_ONLY ) + if (!from.is_array()) { - throw std::runtime_error( "[JSON] Creating a dataset in a file opened as read only is not possible." ); + into = from; // copy } - if( !writable->written ) + else { - /* Sanitize name */ - std::string name = removeSlashes( parameter.name ); - - auto file = refreshFileFromParent( writable ); - setAndGetFilePosition( writable ); - auto & jsonVal = obtainJsonContents( writable ); - // be sure to have a JSON object, not a list - if( jsonVal.empty( ) ) - { - jsonVal = nlohmann::json::object( ); - } - setAndGetFilePosition( - writable, - name - ); - auto & dset = jsonVal[name]; - dset["datatype"] = datatypeToString( parameter.dtype ); - switch( parameter.dtype ) + size_t size = from.size(); + for (size_t i = 0; i < size; ++i) { - case Datatype::CFLOAT: - case Datatype::CDOUBLE: - case Datatype::CLONG_DOUBLE: + if (!from[i].is_null()) { - auto complexExtent = parameter.extent; - complexExtent.push_back( 2 ); - dset["data"] = initializeNDArray( complexExtent ); - break; + mergeInto(into[i], from[i]); } - default: - dset["data"] = initializeNDArray( parameter.extent ); - break; } - writable->written = true; - m_dirty.emplace( file ); } } +} // namespace - namespace +void JSONIOHandlerImpl::extendDataset( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot extend a dataset in read-only mode.") + setAndGetFilePosition(writable); + refreshFileFromParent(writable); + auto &j = obtainJsonContents(writable); + + try { - void - mergeInto( nlohmann::json & into, nlohmann::json & from ); - void - mergeInto( nlohmann::json & into, nlohmann::json & from ) + auto datasetExtent = getExtent(j); + VERIFY_ALWAYS( + datasetExtent.size() == parameters.extent.size(), + "[JSON] Cannot change dimensionality of a dataset") + for (size_t currentdim = 0; currentdim < parameters.extent.size(); + currentdim++) { - if( !from.is_array() ) - { - into = from; // copy - } - else - { - size_t size = from.size(); - for( size_t i = 0; i < size; ++i ) - { - if( !from[ i ].is_null() ) - { - mergeInto( into[ i ], from[ i ] ); - } - } - } + VERIFY_ALWAYS( + datasetExtent[currentdim] <= parameters.extent[currentdim], + "[JSON] Cannot shrink the extent of a dataset") } - } // namespace - - void - JSONIOHandlerImpl::extendDataset( - Writable * writable, - Parameter< Operation::EXTEND_DATASET > const & parameters ) + } + catch (json::basic_json::type_error &) { - VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot extend a dataset in read-only mode." ) - setAndGetFilePosition( writable ); - refreshFileFromParent( writable ); - auto & j = obtainJsonContents( writable ); + throw std::runtime_error( + "[JSON] The specified location contains no valid dataset"); + } + switch (stringToDatatype(j["datatype"].get())) + { + case Datatype::CFLOAT: + case Datatype::CDOUBLE: + case Datatype::CLONG_DOUBLE: { + // @todo test complex resizing + auto complexExtent = parameters.extent; + complexExtent.push_back(2); + nlohmann::json newData = initializeNDArray(complexExtent); + nlohmann::json &oldData = j["data"]; + mergeInto(newData, oldData); + j["data"] = newData; + break; + } + default: + nlohmann::json newData = initializeNDArray(parameters.extent); + nlohmann::json &oldData = j["data"]; + mergeInto(newData, oldData); + j["data"] = newData; + break; + } + writable->written = true; +} - try - { - auto datasetExtent = getExtent( j ); - VERIFY_ALWAYS( datasetExtent.size( ) == - parameters.extent - .size( ), - "[JSON] Cannot change dimensionality of a dataset" ) - for( size_t currentdim = 0; - currentdim < - parameters.extent - .size( ); - currentdim++ ) - { - VERIFY_ALWAYS( datasetExtent[currentdim] <= - parameters.extent[currentdim], - "[JSON] Cannot shrink the extent of a dataset" ) - } - } catch( json::basic_json::type_error & ) +namespace +{ + // pre-declare since this one is recursive + ChunkTable chunksInJSON(nlohmann::json const &); + ChunkTable chunksInJSON(nlohmann::json const &j) + { + /* + * Idea: + * Iterate (n-1)-dimensional hyperslabs line by line and query + * their chunks recursively. + * If two or more successive (n-1)-dimensional slabs return the + * same chunktable, they can be merged as one chunk. + * + * Notice that this approach is simple, relatively easily + * implemented, but not ideal, since chunks that overlap in some + * dimensions may be ripped apart: + * + * 0123 + * 0 ____ + * 1 ____ + * 2 **__ + * 3 **__ + * 4 **__ + * 5 **__ + * 6 **__ + * 7 **_* + * 8 ___* + * 9 ___* + * + * Since both of the drawn chunks overlap on line 7, this approach + * will return 4 chunks: + * offset - extent + * (2,0) - (4,2) + * (7,0) - (1,2) + * (7,3) - (1,1) + * (8,3) - (2,1) + * + * Hence, in a second phase, the mergeChunks function below will + * merge things back up. + */ + if (!j.is_array()) { - throw std::runtime_error( - "[JSON] The specified location contains no valid dataset" ); + return ChunkTable{WrittenChunkInfo(Offset{}, Extent{})}; } - switch( stringToDatatype( j[ "datatype" ].get< std::string >() ) ) + ChunkTable res; + size_t it = 0; + size_t end = j.size(); + while (it < end) { - case Datatype::CFLOAT: - case Datatype::CDOUBLE: - case Datatype::CLONG_DOUBLE: + // skip empty slots + while (it < end && j[it].is_null()) { - // @todo test complex resizing - auto complexExtent = parameters.extent; - complexExtent.push_back( 2 ); - nlohmann::json newData = initializeNDArray( complexExtent ); - nlohmann::json & oldData = j[ "data" ]; - mergeInto( newData, oldData ); - j[ "data" ] = newData; - break; + ++it; } - default: - nlohmann::json newData = initializeNDArray( parameters.extent ); - nlohmann::json & oldData = j[ "data" ]; - mergeInto( newData, oldData ); - j[ "data" ] = newData; - break; - } - writable->written = true; - } - - namespace - { - // pre-declare since this one is recursive - ChunkTable - chunksInJSON( nlohmann::json const & ); - ChunkTable - chunksInJSON( nlohmann::json const & j ) - { - /* - * Idea: - * Iterate (n-1)-dimensional hyperslabs line by line and query - * their chunks recursively. - * If two or more successive (n-1)-dimensional slabs return the - * same chunktable, they can be merged as one chunk. - * - * Notice that this approach is simple, relatively easily - * implemented, but not ideal, since chunks that overlap in some - * dimensions may be ripped apart: - * - * 0123 - * 0 ____ - * 1 ____ - * 2 **__ - * 3 **__ - * 4 **__ - * 5 **__ - * 6 **__ - * 7 **_* - * 8 ___* - * 9 ___* - * - * Since both of the drawn chunks overlap on line 7, this approach - * will return 4 chunks: - * offset - extent - * (2,0) - (4,2) - * (7,0) - (1,2) - * (7,3) - (1,1) - * (8,3) - (2,1) - * - * Hence, in a second phase, the mergeChunks function below will - * merge things back up. - */ - if( !j.is_array() ) + if (it == end) { - return ChunkTable{ WrittenChunkInfo( Offset{}, Extent{} ) }; + break; } - ChunkTable res; - size_t it = 0; - size_t end = j.size(); - while( it < end ) + // get chunking at current position + // and additionally, number of successive rows with the same + // recursive results + size_t const offset = it; + ChunkTable referenceTable = chunksInJSON(j[it]); + ++it; + for (; it < end; ++it) { - // skip empty slots - while( it < end && j[ it ].is_null() ) + if (j[it].is_null()) { - ++it; + break; } - if( it == end ) + ChunkTable currentTable = chunksInJSON(j[it]); + if (currentTable != referenceTable) { break; } - // get chunking at current position - // and additionally, number of successive rows with the same - // recursive results - size_t const offset = it; - ChunkTable referenceTable = chunksInJSON( j[ it ] ); - ++it; - for( ; it < end; ++it ) + } + size_t const extent = it - offset; // sic! no -1 + // now we know the number of successive rows with same rec. + // results, let's extend these results to include dimension 0 + for (auto const &chunk : referenceTable) + { + Offset o = {offset}; + Extent e = {extent}; + for (auto entry : chunk.offset) { - if( j[ it ].is_null() ) - { - break; - } - ChunkTable currentTable = chunksInJSON( j[ it ] ); - if( currentTable != referenceTable ) - { - break; - } + o.push_back(entry); } - size_t const extent = it - offset; // sic! no -1 - // now we know the number of successive rows with same rec. - // results, let's extend these results to include dimension 0 - for( auto const & chunk : referenceTable ) + for (auto entry : chunk.extent) { - Offset o = { offset }; - Extent e = { extent }; - for( auto entry : chunk.offset ) - { - o.push_back( entry ); - } - for( auto entry : chunk.extent ) - { - e.push_back( entry ); - } - res.emplace_back( - std::move( o ), std::move( e ), chunk.sourceID ); + e.push_back(entry); } + res.emplace_back(std::move(o), std::move(e), chunk.sourceID); } - return res; } + return res; + } + /* + * Check whether two chunks can be merged to form a large one + * and optionally return that larger chunk + */ + auxiliary::Option + mergeChunks(WrittenChunkInfo const &chunk1, WrittenChunkInfo const &chunk2) + { /* - * Check whether two chunks can be merged to form a large one - * and optionally return that larger chunk + * Idea: + * If two chunks can be merged into one, they agree on offsets and + * extents in all but exactly one dimension dim. + * At dimension dim, the offset of chunk 2 is equal to the offset + * of chunk 1 plus its extent -- or vice versa. */ - auxiliary::Option< WrittenChunkInfo > - mergeChunks( - WrittenChunkInfo const & chunk1, - WrittenChunkInfo const & chunk2 ) - { - /* - * Idea: - * If two chunks can be merged into one, they agree on offsets and - * extents in all but exactly one dimension dim. - * At dimension dim, the offset of chunk 2 is equal to the offset - * of chunk 1 plus its extent -- or vice versa. - */ - unsigned dimensionality = chunk1.extent.size(); - for( unsigned dim = 0; dim < dimensionality; ++dim ) + unsigned dimensionality = chunk1.extent.size(); + for (unsigned dim = 0; dim < dimensionality; ++dim) + { + WrittenChunkInfo const *c1(&chunk1), *c2(&chunk2); + // check if one chunk is the extension of the other at + // dimension dim + // first, let's put things in order + if (c1->offset[dim] > c2->offset[dim]) { - WrittenChunkInfo const *c1( &chunk1 ), *c2( &chunk2 ); - // check if one chunk is the extension of the other at - // dimension dim - // first, let's put things in order - if( c1->offset[ dim ] > c2->offset[ dim ] ) - { - std::swap( c1, c2 ); - } - // now, c1 begins at the lower of both offsets - // next check, that both chunks border one another exactly - if( c2->offset[ dim ] != c1->offset[ dim ] + c1->extent[ dim ] ) + std::swap(c1, c2); + } + // now, c1 begins at the lower of both offsets + // next check, that both chunks border one another exactly + if (c2->offset[dim] != c1->offset[dim] + c1->extent[dim]) + { + continue; + } + // we've got a candidate + // verify that all other dimensions have equal values + auto equalValues = [dimensionality, dim, c1, c2]() { + for (unsigned j = 0; j < dimensionality; ++j) { - continue; - } - // we've got a candidate - // verify that all other dimensions have equal values - auto equalValues = [ dimensionality, dim, c1, c2 ]() { - for( unsigned j = 0; j < dimensionality; ++j ) + if (j == dim) { - if( j == dim ) - { - continue; - } - if( c1->offset[ j ] != c2->offset[ j ] || - c1->extent[ j ] != c2->extent[ j ] ) - { - return false; - } + continue; + } + if (c1->offset[j] != c2->offset[j] || + c1->extent[j] != c2->extent[j]) + { + return false; } - return true; - }; - if( !equalValues() ) - { - continue; } - // we can merge the chunks - Offset offset( c1->offset ); - Extent extent( c1->extent ); - extent[ dim ] += c2->extent[ dim ]; - return auxiliary::makeOption( - WrittenChunkInfo( offset, extent ) ); + return true; + }; + if (!equalValues()) + { + continue; } - return auxiliary::Option< WrittenChunkInfo >(); + // we can merge the chunks + Offset offset(c1->offset); + Extent extent(c1->extent); + extent[dim] += c2->extent[dim]; + return auxiliary::makeOption(WrittenChunkInfo(offset, extent)); } + return auxiliary::Option(); + } - /* - * Merge chunks in the chunktable until no chunks are left that can be - * merged. - */ - void - mergeChunks( ChunkTable & table ) + /* + * Merge chunks in the chunktable until no chunks are left that can be + * merged. + */ + void mergeChunks(ChunkTable &table) + { + bool stillChanging; + do { - bool stillChanging; - do - { - stillChanging = false; - auto innerLoops = [ &table ]() { - /* - * Iterate over pairs of chunks in the table. - * When a pair that can be merged is found, merge it, - * delete the original two chunks from the table, - * put the new one in and return. - */ - for( auto i = table.begin(); i < table.end(); ++i ) + stillChanging = false; + auto innerLoops = [&table]() { + /* + * Iterate over pairs of chunks in the table. + * When a pair that can be merged is found, merge it, + * delete the original two chunks from the table, + * put the new one in and return. + */ + for (auto i = table.begin(); i < table.end(); ++i) + { + for (auto j = i + 1; j < table.end(); ++j) { - for( auto j = i + 1; j < table.end(); ++j ) + auxiliary::Option merged = + mergeChunks(*i, *j); + if (merged) { - auxiliary::Option< WrittenChunkInfo > merged = - mergeChunks( *i, *j ); - if( merged ) - { - // erase order is important due to iterator - // invalidation - table.erase( j ); - table.erase( i ); - table.emplace_back( - std::move( merged.get() ) ); - return true; - } + // erase order is important due to iterator + // invalidation + table.erase(j); + table.erase(i); + table.emplace_back(std::move(merged.get())); + return true; } } - return false; - }; - stillChanging = innerLoops(); - } while( stillChanging ); - } - } // namespace + } + return false; + }; + stillChanging = innerLoops(); + } while (stillChanging); + } +} // namespace - void - JSONIOHandlerImpl::availableChunks( - Writable * writable, - Parameter< Operation::AVAILABLE_CHUNKS > & parameters ) +void JSONIOHandlerImpl::availableChunks( + Writable *writable, Parameter ¶meters) +{ + refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable)["data"]; + *parameters.chunks = chunksInJSON(j); + mergeChunks(*parameters.chunks); +} + +void JSONIOHandlerImpl::openFile( + Writable *writable, Parameter const ¶meter) +{ + if (!auxiliary::directory_exists(m_handler->directory)) { - refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable )[ "data" ]; - *parameters.chunks = chunksInJSON( j ); - mergeChunks( *parameters.chunks ); + throw no_such_file_error( + "[JSON] Supplied directory is not valid: " + m_handler->directory); } - void JSONIOHandlerImpl::openFile( - Writable * writable, - Parameter< Operation::OPEN_FILE > const & parameter - ) + std::string name = parameter.name; + if (!auxiliary::ends_with(name, ".json")) { - if( !auxiliary::directory_exists( m_handler->directory ) ) - { - throw no_such_file_error( - "[JSON] Supplied directory is not valid: " + m_handler->directory - ); - } + name += ".json"; + } - std::string name = parameter.name; - if( !auxiliary::ends_with( - name, - ".json" - ) ) - { - name += ".json"; - } + auto file = std::get<0>(getPossiblyExisting(name)); - auto file = std::get< 0 >( getPossiblyExisting( name ) ); + associateWithFile(writable, file); - associateWithFile( - writable, - file - ); + writable->written = true; + writable->abstractFilePosition = std::make_shared(); +} - writable->written = true; - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( ); +void JSONIOHandlerImpl::closeFile( + Writable *writable, Parameter const &) +{ + auto fileIterator = m_files.find(writable); + if (fileIterator != m_files.end()) + { + putJsonContents(fileIterator->second); + // do not invalidate the file + // it still exists, it is just not open + m_files.erase(fileIterator); } +} +void JSONIOHandlerImpl::openPath( + Writable *writable, Parameter const ¶meters) +{ + auto file = refreshFileFromParent(writable); + + nlohmann::json *j = &obtainJsonContents(writable->parent); + auto path = removeSlashes(parameters.path); + path = path.empty() ? filepositionOf(writable->parent) + : filepositionOf(writable->parent) + "/" + path; - void JSONIOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & - ) + if (writable->abstractFilePosition) { - auto fileIterator = m_files.find( writable ); - if ( fileIterator != m_files.end( ) ) - { - putJsonContents( fileIterator->second ); - // do not invalidate the file - // it still exists, it is just not open - m_files.erase( fileIterator ); - } + *setAndGetFilePosition(writable, false) = + JSONFilePosition(json::json_pointer(path)); } - - - void JSONIOHandlerImpl::openPath( - Writable * writable, - Parameter< Operation::OPEN_PATH > const & parameters - ) + else { - auto file = refreshFileFromParent( writable ); - - nlohmann::json * j = &obtainJsonContents( writable->parent ); - auto path = removeSlashes( parameters.path ); - path = - path.empty( ) - ? filepositionOf( writable->parent ) - : filepositionOf( writable->parent ) + "/" + path; + writable->abstractFilePosition = + std::make_shared(json::json_pointer(path)); + } - if( writable->abstractFilePosition ) - { - *setAndGetFilePosition( - writable, - false - ) = JSONFilePosition( json::json_pointer( path ) ); - } - else - { - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( json::json_pointer( path ) ); - } + ensurePath(j, removeSlashes(parameters.path)); - ensurePath( - j, - removeSlashes( parameters.path ) - ); + writable->written = true; +} - writable->written = true; +void JSONIOHandlerImpl::openDataset( + Writable *writable, Parameter ¶meters) +{ + refreshFileFromParent(writable); + auto name = removeSlashes(parameters.name); + auto &datasetJson = obtainJsonContents(writable->parent)[name]; + /* + * If the dataset has been opened previously, the path needs not be + * set again. + */ + if (!writable->abstractFilePosition) + { + setAndGetFilePosition(writable, name); } + *parameters.dtype = + Datatype(stringToDatatype(datasetJson["datatype"].get())); + *parameters.extent = getExtent(datasetJson); + writable->written = true; +} - void JSONIOHandlerImpl::openDataset( - Writable * writable, - Parameter< Operation::OPEN_DATASET > & parameters - ) - { - refreshFileFromParent( writable ); - auto name = removeSlashes( parameters.name ); - auto & datasetJson = obtainJsonContents( writable->parent )[name]; - /* - * If the dataset has been opened previously, the path needs not be - * set again. - */ - if(! writable->abstractFilePosition ) - { - setAndGetFilePosition( - writable, - name - ); - } +void JSONIOHandlerImpl::deleteFile( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete files in read-only mode") - *parameters.dtype = - Datatype( stringToDatatype( datasetJson["datatype"].get< std::string >( ) ) ); - *parameters.extent = getExtent( datasetJson ); - writable->written = true; + if (!writable->written) + { + return; } + auto filename = auxiliary::ends_with(parameters.name, ".json") + ? parameters.name + : parameters.name + ".json"; - void JSONIOHandlerImpl::deleteFile( - Writable * writable, - Parameter< Operation::DELETE_FILE > const & parameters - ) + auto tuple = getPossiblyExisting(filename); + if (!std::get<2>(tuple)) { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete files in read-only mode" ) - - if( !writable->written ) - { - return; - } + // file is already in the system + auto file = std::get<0>(tuple); + m_dirty.erase(file); + m_jsonVals.erase(file); + file.invalidate(); + } - auto filename = auxiliary::ends_with( - parameters.name, - ".json" - ) ? parameters.name : parameters.name + ".json"; + std::remove(fullPath(filename).c_str()); - auto tuple = getPossiblyExisting( filename ); - if( !std::get< 2 >( tuple ) ) - { - // file is already in the system - auto file = std::get< 0 >( tuple ); - m_dirty.erase( file ); - m_jsonVals.erase( file ); - file.invalidate( ); - } + writable->written = false; +} - std::remove( fullPath( filename ).c_str( ) ); +void JSONIOHandlerImpl::deletePath( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete paths in read-only mode") - writable->written = false; + if (!writable->written) + { + return; } - - void JSONIOHandlerImpl::deletePath( - Writable * writable, - Parameter< Operation::DELETE_PATH > const & parameters - ) + VERIFY_ALWAYS( + !auxiliary::starts_with(parameters.path, '/'), + "[JSON] Paths passed for deletion should be relative, the given path " + "is absolute (starts with '/')") + auto file = refreshFileFromParent(writable); + auto filepos = setAndGetFilePosition(writable, false); + auto path = removeSlashes(parameters.path); + VERIFY(!path.empty(), "[JSON] No path passed for deletion.") + nlohmann::json *j; + if (path == ".") { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete paths in read-only mode" ) - - if( !writable->written ) + auto s = filepos->id.to_string(); + if (s == "/") { - return; + throw std::runtime_error("[JSON] Cannot delete the root group"); } - VERIFY_ALWAYS( !auxiliary::starts_with( - parameters.path, - '/' - ), - "[JSON] Paths passed for deletion should be relative, the given path is absolute (starts with '/')" ) - auto file = refreshFileFromParent( writable ); - auto filepos = setAndGetFilePosition( - writable, - false - ); - auto path = removeSlashes( parameters.path ); - VERIFY( !path.empty( ), - "[JSON] No path passed for deletion." ) - nlohmann::json * j; - if( path == "." ) - { - auto - s = - filepos->id - .to_string( ); - if( s == "/" ) - { - throw std::runtime_error( "[JSON] Cannot delete the root group" ); - } + auto i = s.rfind('/'); + path = s; + path.replace(0, i + 1, ""); + // path should now be equal to the name of the current group + // go up one group - auto i = s.rfind( '/' ); - path = s; - path.replace( - 0, - i + 1, - "" - ); - // path should now be equal to the name of the current group - // go up one group - - // go to parent directory - // parent exists since we have verified that the current - // directory is != root - parentDir( s ); - j = - &( *obtainJsonContents( file ) )[nlohmann::json::json_pointer( s )]; - } - else + // go to parent directory + // parent exists since we have verified that the current + // directory is != root + parentDir(s); + j = &(*obtainJsonContents(file))[nlohmann::json::json_pointer(s)]; + } + else + { + if (auxiliary::starts_with(path, "./")) { - if( auxiliary::starts_with( - path, - "./" - ) ) - { - path = auxiliary::replace_first( - path, - "./", - "" - ); - } - j = &obtainJsonContents( writable ); + path = auxiliary::replace_first(path, "./", ""); } - nlohmann::json * lastPointer = j; - bool needToDelete = true; - auto splitPath = auxiliary::split( - path, - "/" - ); - // be careful not to create the group by accident - // the loop will execute at least once - for( auto const & folder: splitPath ) + j = &obtainJsonContents(writable); + } + nlohmann::json *lastPointer = j; + bool needToDelete = true; + auto splitPath = auxiliary::split(path, "/"); + // be careful not to create the group by accident + // the loop will execute at least once + for (auto const &folder : splitPath) + { + auto it = j->find(folder); + if (it == j->end()) { - auto it = j->find( folder ); - if( it == j->end( ) ) - { - needToDelete = false; - break; - } - else - { - lastPointer = j; - j = &it.value( ); - } + needToDelete = false; + break; } - if( needToDelete ) + else { - lastPointer->erase( - splitPath[splitPath.size( ) - 1] - ); + lastPointer = j; + j = &it.value(); } - - putJsonContents( file ); - writable->abstractFilePosition - .reset( ); - writable->written = false; } - - - void JSONIOHandlerImpl::deleteDataset( - Writable * writable, - Parameter< Operation::DELETE_DATASET > const & parameters - ) + if (needToDelete) { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete datasets in read-only mode" ) + lastPointer->erase(splitPath[splitPath.size() - 1]); + } - if( !writable->written ) - { - return; - } + putJsonContents(file); + writable->abstractFilePosition.reset(); + writable->written = false; +} - auto filepos = setAndGetFilePosition( - writable, - false - ); +void JSONIOHandlerImpl::deleteDataset( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete datasets in read-only mode") - auto file = refreshFileFromParent( writable ); - auto dataset = removeSlashes( parameters.name ); - nlohmann::json * parent; - if( dataset == "." ) - { - auto - s = - filepos->id - .to_string( ); - if( s.empty( ) ) - { - throw std::runtime_error( "[JSON] Invalid position for a dataset in the JSON file." ); - } - dataset = s; - auto i = dataset.rfind( '/' ); - dataset.replace( - 0, - i + 1, - "" - ); - - parentDir( s ); - parent = - &( *obtainJsonContents( file ) )[nlohmann::json::json_pointer( s )]; - } - else - { - parent = &obtainJsonContents( writable ); - } - parent->erase( dataset ); - putJsonContents( file ); - writable->written = false; - writable->abstractFilePosition - .reset( ); + if (!writable->written) + { + return; } + auto filepos = setAndGetFilePosition(writable, false); - void JSONIOHandlerImpl::deleteAttribute( - Writable * writable, - Parameter< Operation::DELETE_ATT > const & parameters - ) + auto file = refreshFileFromParent(writable); + auto dataset = removeSlashes(parameters.name); + nlohmann::json *parent; + if (dataset == ".") { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete attributes in read-only mode" ) - if( !writable->written ) + auto s = filepos->id.to_string(); + if (s.empty()) { - return; + throw std::runtime_error( + "[JSON] Invalid position for a dataset in the JSON file."); } - setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable ); - auto & j = obtainJsonContents( writable ); - j.erase( parameters.name ); - putJsonContents( file ); - } - + dataset = s; + auto i = dataset.rfind('/'); + dataset.replace(0, i + 1, ""); - void JSONIOHandlerImpl::writeDataset( - Writable * writable, - Parameter< Operation::WRITE_DATASET > const & parameters - ) + parentDir(s); + parent = &(*obtainJsonContents(file))[nlohmann::json::json_pointer(s)]; + } + else { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot write data in read-only mode." ); + parent = &obtainJsonContents(writable); + } + parent->erase(dataset); + putJsonContents(file); + writable->written = false; + writable->abstractFilePosition.reset(); +} + +void JSONIOHandlerImpl::deleteAttribute( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete attributes in read-only mode") + if (!writable->written) + { + return; + } + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable); + auto &j = obtainJsonContents(writable); + j.erase(parameters.name); + putJsonContents(file); +} + +void JSONIOHandlerImpl::writeDataset( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot write data in read-only mode."); - auto pos = setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable ); - auto & j = obtainJsonContents( writable ); + auto pos = setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable); + auto &j = obtainJsonContents(writable); - verifyDataset( - parameters, - j - ); + verifyDataset(parameters, j); + DatasetWriter dw; + switchType(parameters.dtype, dw, j, parameters); - DatasetWriter dw; - switchType( - parameters.dtype, - dw, - j, - parameters - ); + writable->written = true; + putJsonContents(file); +} - writable->written = true; - putJsonContents( file ); +void JSONIOHandlerImpl::writeAttribute( + Writable *writable, Parameter const ¶meter) +{ + if (m_handler->m_backendAccess == Access::READ_ONLY) + { + throw std::runtime_error( + "[JSON] Creating a dataset in a file opened as read only is not " + "possible."); } + /* Sanitize name */ + std::string name = removeSlashes(parameter.name); - void JSONIOHandlerImpl::writeAttribute( - Writable * writable, - Parameter< Operation::WRITE_ATT > const & parameter - ) + auto file = refreshFileFromParent(writable); + auto jsonVal = obtainJsonContents(file); + auto filePosition = setAndGetFilePosition(writable); + if ((*jsonVal)[filePosition->id]["attributes"].empty()) { - if(m_handler->m_backendAccess == Access::READ_ONLY ) - { - throw std::runtime_error( "[JSON] Creating a dataset in a file opened as read only is not possible." ); - } - - /* Sanitize name */ - std::string name = removeSlashes( parameter.name ); - - auto file = refreshFileFromParent( writable ); - auto jsonVal = obtainJsonContents( file ); - auto filePosition = setAndGetFilePosition( writable ); - if( ( *jsonVal )[filePosition->id]["attributes"].empty( ) ) - { - ( *jsonVal )[filePosition->id]["attributes"] = - nlohmann::json::object( ); - } - nlohmann::json value; - AttributeWriter aw; - switchType( - parameter.dtype, - aw, - value, - parameter.resource - ); - ( *jsonVal )[filePosition->id]["attributes"][parameter.name] = { - { - "datatype", - datatypeToString( parameter.dtype ) - }, - { - "value", - value - } - }; - writable->written = true; - m_dirty.emplace( file ); + (*jsonVal)[filePosition->id]["attributes"] = nlohmann::json::object(); } + nlohmann::json value; + AttributeWriter aw; + switchType(parameter.dtype, aw, value, parameter.resource); + (*jsonVal)[filePosition->id]["attributes"][parameter.name] = { + {"datatype", datatypeToString(parameter.dtype)}, {"value", value}}; + writable->written = true; + m_dirty.emplace(file); +} + +void JSONIOHandlerImpl::readDataset( + Writable *writable, Parameter ¶meters) +{ + refreshFileFromParent(writable); + setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable); + verifyDataset(parameters, j); - - void JSONIOHandlerImpl::readDataset( - Writable * writable, - Parameter< Operation::READ_DATASET > & parameters - ) + try { - refreshFileFromParent( writable ); - setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable ); - verifyDataset( - parameters, - j - ); - - try - { - DatasetReader dr; - switchType( - parameters.dtype, - dr, - j["data"], - parameters - ); - } catch( json::basic_json::type_error & ) - { - throw std::runtime_error( "[JSON] The given path does not contain a valid dataset." ); - } + DatasetReader dr; + switchType(parameters.dtype, dr, j["data"], parameters); } - - - void JSONIOHandlerImpl::readAttribute( - Writable * writable, - Parameter< Operation::READ_ATT > & parameters - ) - { - VERIFY_ALWAYS( writable->written, - "[JSON] Attributes have to be written before reading." ) - refreshFileFromParent( writable ); - auto name = removeSlashes( parameters.name ); - auto & jsonLoc = obtainJsonContents( writable )["attributes"]; - setAndGetFilePosition( writable ); - std::string error_msg("[JSON] No such attribute '"); - error_msg.append(name) - .append("' in the given location '") - .append(jsonLoc.dump()) - .append("'."); - VERIFY_ALWAYS( hasKey( - jsonLoc, - name - ), error_msg ) - auto & j = jsonLoc[name]; - try - { - *parameters.dtype = - Datatype( stringToDatatype( j["datatype"].get< std::string >( ) ) ); - AttributeReader ar; - switchType( - *parameters.dtype, - ar, - j["value"], - parameters - ); - } catch( json::type_error & ) - { - throw std::runtime_error( "[JSON] The given location does not contain a properly formatted attribute" ); - } + catch (json::basic_json::type_error &) + { + throw std::runtime_error( + "[JSON] The given path does not contain a valid dataset."); } +} - - void JSONIOHandlerImpl::listPaths( - Writable * writable, - Parameter< Operation::LIST_PATHS > & parameters - ) +void JSONIOHandlerImpl::readAttribute( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, + "[JSON] Attributes have to be written before reading.") + refreshFileFromParent(writable); + auto name = removeSlashes(parameters.name); + auto &jsonLoc = obtainJsonContents(writable)["attributes"]; + setAndGetFilePosition(writable); + std::string error_msg("[JSON] No such attribute '"); + error_msg.append(name) + .append("' in the given location '") + .append(jsonLoc.dump()) + .append("'."); + VERIFY_ALWAYS(hasKey(jsonLoc, name), error_msg) + auto &j = jsonLoc[name]; + try { - VERIFY_ALWAYS( writable->written, - "[JSON] Values have to be written before reading a directory" ); - auto & j = obtainJsonContents( writable ); - setAndGetFilePosition( writable ); - refreshFileFromParent( writable ); - parameters.paths - ->clear( ); - for( auto it = j.begin( ); it != j.end( ); it++ ) - { - if( isGroup( it ) ) - { - parameters.paths - ->push_back( it.key( ) ); - } - } + *parameters.dtype = + Datatype(stringToDatatype(j["datatype"].get())); + AttributeReader ar; + switchType(*parameters.dtype, ar, j["value"], parameters); } - - - void JSONIOHandlerImpl::listDatasets( - Writable * writable, - Parameter< Operation::LIST_DATASETS > & parameters - ) + catch (json::type_error &) { - VERIFY_ALWAYS( writable->written, - "[JSON] Datasets have to be written before reading." ) - refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable ); - parameters.datasets - ->clear( ); - for( auto it = j.begin( ); it != j.end( ); it++ ) - { - if( isDataset( it.value() ) ) - { - parameters.datasets - ->push_back( it.key( ) ); - } - } + throw std::runtime_error( + "[JSON] The given location does not contain a properly formatted " + "attribute"); } +} - - void JSONIOHandlerImpl::listAttributes( - Writable * writable, - Parameter< Operation::LIST_ATTS > & parameters - ) +void JSONIOHandlerImpl::listPaths( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, + "[JSON] Values have to be written before reading a directory"); + auto &j = obtainJsonContents(writable); + setAndGetFilePosition(writable); + refreshFileFromParent(writable); + parameters.paths->clear(); + for (auto it = j.begin(); it != j.end(); it++) { - VERIFY_ALWAYS( writable->written, - "[JSON] Attributes have to be written before reading." ) - refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable )["attributes"]; - for( auto it = j.begin( ); it != j.end( ); it++ ) + if (isGroup(it)) { - parameters.attributes - ->push_back( it.key( ) ); + parameters.paths->push_back(it.key()); } } +} - - std::shared_ptr< JSONIOHandlerImpl::FILEHANDLE > - JSONIOHandlerImpl::getFilehandle( - File fileName, - Access access - ) +void JSONIOHandlerImpl::listDatasets( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, "[JSON] Datasets have to be written before reading.") + refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable); + parameters.datasets->clear(); + for (auto it = j.begin(); it != j.end(); it++) { - VERIFY_ALWAYS( fileName.valid( ), - "[JSON] Tried opening a file that has been overwritten or deleted." ) - auto path = fullPath( std::move( fileName ) ); - auto fs = std::make_shared< std::fstream >( ); - switch( access ) + if (isDataset(it.value())) { - case Access::CREATE: - case Access::READ_WRITE: - fs->open( - path, - std::ios_base::out | std::ios_base::trunc - ); - break; - case Access::READ_ONLY: - fs->open( - path, - std::ios_base::in - ); - break; + parameters.datasets->push_back(it.key()); } - VERIFY( fs->good( ), - "[JSON] Failed opening a file" ); - return fs; } +} - - std::string JSONIOHandlerImpl::fullPath( File fileName ) +void JSONIOHandlerImpl::listAttributes( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, + "[JSON] Attributes have to be written before reading.") + refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable)["attributes"]; + for (auto it = j.begin(); it != j.end(); it++) { - return fullPath( *fileName ); + parameters.attributes->push_back(it.key()); } +} - - std::string JSONIOHandlerImpl::fullPath( std::string const & fileName ) +std::shared_ptr +JSONIOHandlerImpl::getFilehandle(File fileName, Access access) +{ + VERIFY_ALWAYS( + fileName.valid(), + "[JSON] Tried opening a file that has been overwritten or deleted.") + auto path = fullPath(std::move(fileName)); + auto fs = std::make_shared(); + switch (access) { - if( auxiliary::ends_with( - m_handler->directory, - "/" - ) ) - { - return m_handler->directory + fileName; - } - else - { - return m_handler->directory + "/" + fileName; - } + case Access::CREATE: + case Access::READ_WRITE: + fs->open(path, std::ios_base::out | std::ios_base::trunc); + break; + case Access::READ_ONLY: + fs->open(path, std::ios_base::in); + break; } + VERIFY(fs->good(), "[JSON] Failed opening a file '" + path + "'"); + return fs; +} +std::string JSONIOHandlerImpl::fullPath(File fileName) +{ + return fullPath(*fileName); +} - void JSONIOHandlerImpl::parentDir( std::string & s ) +std::string JSONIOHandlerImpl::fullPath(std::string const &fileName) +{ + if (auxiliary::ends_with(m_handler->directory, "/")) { - auto i = s.rfind( '/' ); - if( i != std::string::npos ) - { - s.replace( - i, - s.size( ) - i, - "" - ); - s.shrink_to_fit( ); - } + return m_handler->directory + fileName; } - - - std::string JSONIOHandlerImpl::filepositionOf( Writable * writable ) + else { - return std::dynamic_pointer_cast< JSONFilePosition >( writable->abstractFilePosition )->id - .to_string( ); + return m_handler->directory + "/" + fileName; } +} - - template< - typename T, - typename Visitor - > - void JSONIOHandlerImpl::syncMultidimensionalJson( - nlohmann::json & j, - Offset const & offset, - Extent const & extent, - Extent const & multiplicator, - Visitor visitor, - T * data, - size_t currentdim - ) +void JSONIOHandlerImpl::parentDir(std::string &s) +{ + auto i = s.rfind('/'); + if (i != std::string::npos) { - // Offset only relevant for JSON, the array data is contiguous - auto off = offset[currentdim]; - // maybe rewrite iteratively, using a stack that stores for each level the - // current iteration value i - - if( currentdim == offset.size( ) - 1 ) - { - for( std::size_t i = 0; i < extent[currentdim]; ++i ) - { - visitor( - j[i + off], - data[i] - ); - } - } - else - { - for( std::size_t i = 0; i < extent[currentdim]; ++i ) - { - syncMultidimensionalJson< - T, - Visitor - >( - j[i + off], - offset, - extent, - multiplicator, - visitor, - data + i * multiplicator[currentdim], - currentdim + 1 - ); - } - } + s.replace(i, s.size() - i, ""); + s.shrink_to_fit(); } +} +std::string JSONIOHandlerImpl::filepositionOf(Writable *writable) +{ + return std::dynamic_pointer_cast( + writable->abstractFilePosition) + ->id.to_string(); +} + +template +void JSONIOHandlerImpl::syncMultidimensionalJson( + nlohmann::json &j, + Offset const &offset, + Extent const &extent, + Extent const &multiplicator, + Visitor visitor, + T *data, + size_t currentdim) +{ + // Offset only relevant for JSON, the array data is contiguous + auto off = offset[currentdim]; + // maybe rewrite iteratively, using a stack that stores for each level the + // current iteration value i - // multiplicators: an array [m_0,...,m_n] s.t. - // data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] - // (m_n = 1) - Extent JSONIOHandlerImpl::getMultiplicators( Extent const & extent ) + if (currentdim == offset.size() - 1) { - Extent res( extent ); - Extent::value_type n = 1; - size_t i = extent.size( ); - do + for (std::size_t i = 0; i < extent[currentdim]; ++i) { - --i; - res[i] = n; - n *= extent[i]; + visitor(j[i + off], data[i]); } - while( i > 0 ); - return res; } - - - nlohmann::json JSONIOHandlerImpl::initializeNDArray( Extent const & extent ) + else { - // idea: begin from the innermost shale and copy the result into the - // outer shales - nlohmann::json accum; - nlohmann::json old; - auto * accum_ptr = & accum; - auto * old_ptr = & old; - for( auto it = extent.rbegin( ); it != extent.rend( ); it++ ) + for (std::size_t i = 0; i < extent[currentdim]; ++i) { - std::swap(old_ptr, accum_ptr); - *accum_ptr = nlohmann::json {}; - for( Extent::value_type i = 0; i < *it; i++ ) - { - (*accum_ptr)[i] = *old_ptr; // copy boi - } + syncMultidimensionalJson( + j[i + off], + offset, + extent, + multiplicator, + visitor, + data + i * multiplicator[currentdim], + currentdim + 1); } - return *accum_ptr; } +} - - Extent JSONIOHandlerImpl::getExtent( nlohmann::json & j ) +// multiplicators: an array [m_0,...,m_n] s.t. +// data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] +// (m_n = 1) +Extent JSONIOHandlerImpl::getMultiplicators(Extent const &extent) +{ + Extent res(extent); + Extent::value_type n = 1; + size_t i = extent.size(); + do { - Extent res; - nlohmann::json * ptr = &j["data"]; - while( ptr->is_array( ) ) - { - res.push_back( ptr->size( ) ); - ptr = &( *ptr )[0]; - } - switch( stringToDatatype( j["datatype"].get() ) ) + --i; + res[i] = n; + n *= extent[i]; + } while (i > 0); + return res; +} + +nlohmann::json JSONIOHandlerImpl::initializeNDArray(Extent const &extent) +{ + // idea: begin from the innermost shale and copy the result into the + // outer shales + nlohmann::json accum; + nlohmann::json old; + auto *accum_ptr = &accum; + auto *old_ptr = &old; + for (auto it = extent.rbegin(); it != extent.rend(); it++) + { + std::swap(old_ptr, accum_ptr); + *accum_ptr = nlohmann::json{}; + for (Extent::value_type i = 0; i < *it; i++) { - case Datatype::CFLOAT: - case Datatype::CDOUBLE: - case Datatype::CLONG_DOUBLE: - // the last "dimension" is only the two entries for the complex - // number, so remove that again - res.erase( res.end() - 1 ); - break; - default: - break; + (*accum_ptr)[i] = *old_ptr; // copy boi } - return res; } + return *accum_ptr; +} - - std::string JSONIOHandlerImpl::removeSlashes( std::string s ) +Extent JSONIOHandlerImpl::getExtent(nlohmann::json &j) +{ + Extent res; + nlohmann::json *ptr = &j["data"]; + while (ptr->is_array()) { - if( auxiliary::starts_with( - s, - '/' - ) ) - { - s = auxiliary::replace_first( - s, - "/", - "" - ); - } - if( auxiliary::ends_with( - s, - '/' - ) ) - { - s = auxiliary::replace_last( - s, - "/", - "" - ); - } - return s; + res.push_back(ptr->size()); + ptr = &(*ptr)[0]; } - - - template< typename KeyT > - bool JSONIOHandlerImpl::hasKey( - nlohmann::json & j, - KeyT && key - ) + switch (stringToDatatype(j["datatype"].get())) { - return j.find( std::forward< KeyT >( key ) ) != j.end( ); + case Datatype::CFLOAT: + case Datatype::CDOUBLE: + case Datatype::CLONG_DOUBLE: + // the last "dimension" is only the two entries for the complex + // number, so remove that again + res.erase(res.end() - 1); + break; + default: + break; } + return res; +} - - void JSONIOHandlerImpl::ensurePath( - nlohmann::json * jsonp, - std::string path - ) +std::string JSONIOHandlerImpl::removeSlashes(std::string s) +{ + if (auxiliary::starts_with(s, '/')) { - auto groups = auxiliary::split( - path, - "/" - ); - for( std::string & group: groups ) - { - // Enforce a JSON object - // the library will automatically create a list if the first - // key added to it is parseable as an int - jsonp = &( *jsonp )[group]; - if (jsonp->is_null()) - { - *jsonp = nlohmann::json::object(); - } - } + s = auxiliary::replace_first(s, "/", ""); } - - - std::tuple< - File, - std::unordered_map< - Writable *, - File - >::iterator, - bool - > JSONIOHandlerImpl::getPossiblyExisting( std::string file ) + if (auxiliary::ends_with(s, '/')) { + s = auxiliary::replace_last(s, "/", ""); + } + return s; +} - auto it = std::find_if( - m_files.begin( ), - m_files.end( ), - [file]( - std::unordered_map< - Writable *, - File - >::value_type const & entry - ) - { - return *entry.second == file && - entry.second - .valid( ); - } - ); +template +bool JSONIOHandlerImpl::hasKey(nlohmann::json &j, KeyT &&key) +{ + return j.find(std::forward(key)) != j.end(); +} - bool newlyCreated; - File name; - if( it == m_files.end( ) ) - { - name = file; - newlyCreated = true; - } - else - { - name = it->second; - newlyCreated = false; - } - return std::tuple< - File, - std::unordered_map< - Writable *, - File - >::iterator, - bool - >( - std::move( name ), - it, - newlyCreated - ); - } - - - std::shared_ptr< nlohmann::json > - JSONIOHandlerImpl::obtainJsonContents( File file ) - { - VERIFY_ALWAYS( file.valid( ), - "[JSON] File has been overwritten or deleted before reading" ); - auto it = m_jsonVals.find( file ); - if( it != m_jsonVals.end( ) ) +void JSONIOHandlerImpl::ensurePath(nlohmann::json *jsonp, std::string path) +{ + auto groups = auxiliary::split(path, "/"); + for (std::string &group : groups) + { + // Enforce a JSON object + // the library will automatically create a list if the first + // key added to it is parseable as an int + jsonp = &(*jsonp)[group]; + if (jsonp->is_null()) { - return it->second; + *jsonp = nlohmann::json::object(); } - // read from file - auto fh = getFilehandle( - file, - Access::READ_ONLY - ); - std::shared_ptr< nlohmann::json > - res = std::make_shared< nlohmann::json >( ); - *fh >> *res; - VERIFY( fh->good( ), - "[JSON] Failed reading from a file." ); - m_jsonVals.emplace( - file, - res - ); - return res; } +} + +std::tuple::iterator, bool> +JSONIOHandlerImpl::getPossiblyExisting(std::string file) +{ + auto it = std::find_if( + m_files.begin(), + m_files.end(), + [file](std::unordered_map::value_type const &entry) { + return *entry.second == file && entry.second.valid(); + }); - nlohmann::json & - JSONIOHandlerImpl::obtainJsonContents( Writable * writable ) + bool newlyCreated; + File name; + if (it == m_files.end()) { - auto file = refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( - writable, - false - ); - return ( *obtainJsonContents( file ) )[filePosition->id]; + name = file; + newlyCreated = true; } + else + { + name = it->second; + newlyCreated = false; + } + return std:: + tuple::iterator, bool>( + std::move(name), it, newlyCreated); +} - - void JSONIOHandlerImpl::putJsonContents( - File filename, - bool unsetDirty // = true - ) +std::shared_ptr JSONIOHandlerImpl::obtainJsonContents(File file) +{ + VERIFY_ALWAYS( + file.valid(), + "[JSON] File has been overwritten or deleted before reading"); + auto it = m_jsonVals.find(file); + if (it != m_jsonVals.end()) { - VERIFY_ALWAYS( filename.valid( ), - "[JSON] File has been overwritten/deleted before writing" ); - auto it = m_jsonVals.find( filename ); - if( it != m_jsonVals.end( ) ) + return it->second; + } + // read from file + auto fh = getFilehandle(file, Access::READ_ONLY); + std::shared_ptr res = std::make_shared(); + *fh >> *res; + VERIFY(fh->good(), "[JSON] Failed reading from a file."); + m_jsonVals.emplace(file, res); + return res; +} + +nlohmann::json &JSONIOHandlerImpl::obtainJsonContents(Writable *writable) +{ + auto file = refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable, false); + return (*obtainJsonContents(file))[filePosition->id]; +} + +void JSONIOHandlerImpl::putJsonContents( + File filename, + bool unsetDirty // = true +) +{ + VERIFY_ALWAYS( + filename.valid(), + "[JSON] File has been overwritten/deleted before writing"); + auto it = m_jsonVals.find(filename); + if (it != m_jsonVals.end()) + { + auto fh = getFilehandle(filename, Access::CREATE); + (*it->second)["platform_byte_widths"] = platformSpecifics(); + *fh << *it->second << std::endl; + VERIFY(fh->good(), "[JSON] Failed writing data to disk.") + m_jsonVals.erase(it); + if (unsetDirty) { - auto fh = getFilehandle( - filename, - Access::CREATE - ); - ( *it->second )["platform_byte_widths"] = platformSpecifics( ); - *fh << *it->second << std::endl; - VERIFY( fh->good( ), - "[JSON] Failed writing data to disk." ) - m_jsonVals.erase( it ); - if( unsetDirty ) - { - m_dirty.erase( filename ); - } + m_dirty.erase(filename); } - } +} - - std::shared_ptr< JSONFilePosition > - JSONIOHandlerImpl::setAndGetFilePosition( - Writable * writable, - std::string extend - ) +std::shared_ptr +JSONIOHandlerImpl::setAndGetFilePosition(Writable *writable, std::string extend) +{ + std::string path; + if (writable->abstractFilePosition) { - std::string path; - if( writable->abstractFilePosition ) - { - // do NOT reuse the old pointer, we want to change the file position - // only for the writable! - path = filepositionOf( writable ) + "/" + extend; - } - else if( writable->parent ) + // do NOT reuse the old pointer, we want to change the file position + // only for the writable! + path = filepositionOf(writable) + "/" + extend; + } + else if (writable->parent) + { + path = filepositionOf(writable->parent) + "/" + extend; + } + else + { // we are root + path = extend; + if (!auxiliary::starts_with(path, "/")) { - path = filepositionOf( writable->parent ) + "/" + extend; + path = "/" + path; } - else - { // we are root - path = extend; - if( !auxiliary::starts_with( - path, - "/" - ) ) - { - path = "/" + path; - } - } - auto - res = - std::make_shared< JSONFilePosition >( json::json_pointer( path ) ); - - writable->abstractFilePosition = res; - - return res; } + auto res = std::make_shared(json::json_pointer(path)); + writable->abstractFilePosition = res; - std::shared_ptr< JSONFilePosition > - JSONIOHandlerImpl::setAndGetFilePosition( - Writable * writable, - bool write - ) - { - std::shared_ptr< AbstractFilePosition > res; + return res; +} +std::shared_ptr +JSONIOHandlerImpl::setAndGetFilePosition(Writable *writable, bool write) +{ + std::shared_ptr res; - if( writable->abstractFilePosition ) - { - res = writable->abstractFilePosition; - } - else if( writable->parent ) - { - res = - writable->parent - ->abstractFilePosition; - } - else - { // we are root - res = std::make_shared< JSONFilePosition >( ); - } - if( write ) - { - writable->abstractFilePosition = res; - } - return std::dynamic_pointer_cast< JSONFilePosition >( res ); + if (writable->abstractFilePosition) + { + res = writable->abstractFilePosition; } + else if (writable->parent) + { + res = writable->parent->abstractFilePosition; + } + else + { // we are root + res = std::make_shared(); + } + if (write) + { + writable->abstractFilePosition = res; + } + return std::dynamic_pointer_cast(res); +} - - File JSONIOHandlerImpl::refreshFileFromParent( Writable * writable ) +File JSONIOHandlerImpl::refreshFileFromParent(Writable *writable) +{ + if (writable->parent) { - if( writable->parent ) - { - auto - file = - m_files.find( writable->parent ) - ->second; - associateWithFile( - writable, - file - ); - return file; - } - else - { - return m_files.find( writable ) - ->second; - } + auto file = m_files.find(writable->parent)->second; + associateWithFile(writable, file); + return file; + } + else + { + return m_files.find(writable)->second; } +} +void JSONIOHandlerImpl::associateWithFile(Writable *writable, File file) +{ + // make sure to overwrite + m_files[writable] = std::move(file); +} - void JSONIOHandlerImpl::associateWithFile( - Writable * writable, - File file - ) +bool JSONIOHandlerImpl::isDataset(nlohmann::json const &j) +{ + if (!j.is_object()) { - // make sure to overwrite - m_files[writable] = std::move( file ); + return false; } + auto i = j.find("data"); + return i != j.end() && i.value().is_array(); +} - - bool JSONIOHandlerImpl::isDataset( nlohmann::json const & j ) +bool JSONIOHandlerImpl::isGroup(nlohmann::json::const_iterator it) +{ + auto &j = it.value(); + if (it.key() == "attributes" || it.key() == "platform_byte_widths" || + !j.is_object()) { - if( !j.is_object( ) ) - { - return false; - } - auto i = j.find( "data" ); - return i != j.end( ) && i.value( ).is_array(); + return false; } + auto i = j.find("data"); + return i == j.end() || !i.value().is_array(); +} +template +void JSONIOHandlerImpl::verifyDataset( + Param const ¶meters, nlohmann::json &j) +{ + VERIFY_ALWAYS( + isDataset(j), + "[JSON] Specified dataset does not exist or is not a dataset."); - bool JSONIOHandlerImpl::isGroup( nlohmann::json::const_iterator it ) + try { - auto & j = it.value(); - if( it.key() == "attributes" || it.key() == "platform_byte_widths" || !j.is_object( ) ) + auto datasetExtent = getExtent(j); + VERIFY_ALWAYS( + datasetExtent.size() == parameters.extent.size(), + "[JSON] Read/Write request does not fit the dataset's dimension"); + for (unsigned int dimension = 0; dimension < parameters.extent.size(); + dimension++) { - return false; + VERIFY_ALWAYS( + parameters.offset[dimension] + parameters.extent[dimension] <= + datasetExtent[dimension], + "[JSON] Read/Write request exceeds the dataset's size"); } - auto i = j.find( "data" ); - return i == j.end( ) || !i.value( ).is_array(); + Datatype dt = stringToDatatype(j["datatype"].get()); + VERIFY_ALWAYS( + dt == parameters.dtype, + "[JSON] Read/Write request does not fit the dataset's type"); } - - - template< typename Param > - void JSONIOHandlerImpl::verifyDataset( - Param const & parameters, - nlohmann::json & j - ) + catch (json::basic_json::type_error &) { - VERIFY_ALWAYS( isDataset(j), - "[JSON] Specified dataset does not exist or is not a dataset." ); + throw std::runtime_error( + "[JSON] The given path does not contain a valid dataset."); + } +} - try - { - auto datasetExtent = getExtent( j ); - VERIFY_ALWAYS( datasetExtent.size( ) == - parameters.extent - .size( ), - "[JSON] Read/Write request does not fit the dataset's dimension" ); - for( unsigned int dimension = 0; - dimension < - parameters.extent - .size( ); - dimension++ ) - { - VERIFY_ALWAYS( parameters.offset[dimension] + - parameters.extent[dimension] <= - datasetExtent[dimension], - "[JSON] Read/Write request exceeds the dataset's size" ); - } - Datatype - dt = stringToDatatype( j["datatype"].get< std::string >( ) ); - VERIFY_ALWAYS( dt == parameters.dtype, - "[JSON] Read/Write request does not fit the dataset's type" ); - } catch( json::basic_json::type_error & ) - { - throw std::runtime_error( "[JSON] The given path does not contain a valid dataset." ); - } +nlohmann::json JSONIOHandlerImpl::platformSpecifics() +{ + nlohmann::json res; + static Datatype datatypes[] = { + Datatype::CHAR, + Datatype::UCHAR, + Datatype::SHORT, + Datatype::INT, + Datatype::LONG, + Datatype::LONGLONG, + Datatype::USHORT, + Datatype::UINT, + Datatype::ULONG, + Datatype::ULONGLONG, + Datatype::FLOAT, + Datatype::DOUBLE, + Datatype::LONG_DOUBLE, + Datatype::CFLOAT, + Datatype::CDOUBLE, + Datatype::CLONG_DOUBLE, + Datatype::BOOL}; + for (auto it = std::begin(datatypes); it != std::end(datatypes); it++) + { + res[datatypeToString(*it)] = toBytes(*it); } + return res; +} +template +void JSONIOHandlerImpl::DatasetWriter::operator()( + nlohmann::json &json, const Parameter ¶meters) +{ + CppToJSON ctj; + syncMultidimensionalJson( + json["data"], + parameters.offset, + parameters.extent, + getMultiplicators(parameters.extent), + [&ctj](nlohmann::json &j, T const &data) { j = ctj(data); }, + static_cast(parameters.data.get())); +} + +template +void JSONIOHandlerImpl::DatasetReader::operator()( + nlohmann::json &json, Parameter ¶meters) +{ + JsonToCpp jtc; + syncMultidimensionalJson( + json, + parameters.offset, + parameters.extent, + getMultiplicators(parameters.extent), + [&jtc](nlohmann::json &j, T &data) { data = jtc(j); }, + static_cast(parameters.data.get())); +} + +template +void JSONIOHandlerImpl::AttributeWriter::operator()( + nlohmann::json &value, Attribute::resource const &resource) +{ + CppToJSON ctj; + value = ctj(variantSrc::get(resource)); +} - nlohmann::json JSONIOHandlerImpl::platformSpecifics( ) - { - nlohmann::json res; - static Datatype datatypes[] = { - Datatype::CHAR, - Datatype::UCHAR, - Datatype::SHORT, - Datatype::INT, - Datatype::LONG, - Datatype::LONGLONG, - Datatype::USHORT, - Datatype::UINT, - Datatype::ULONG, - Datatype::ULONGLONG, - Datatype::FLOAT, - Datatype::DOUBLE, - Datatype::LONG_DOUBLE, - Datatype::CFLOAT, - Datatype::CDOUBLE, - Datatype::CLONG_DOUBLE, - Datatype::BOOL - }; - for( auto it = std::begin( datatypes ); - it != std::end( datatypes ); - it++ ) - { - res[datatypeToString( *it )] = toBytes( *it ); - } - return res; - } +template +void JSONIOHandlerImpl::AttributeReader::operator()( + nlohmann::json &json, Parameter ¶meters) +{ + JsonToCpp jtc; + *parameters.resource = jtc(json); +} +template +nlohmann::json JSONIOHandlerImpl::CppToJSON::operator()(const T &val) +{ + return nlohmann::json(val); +} - template< typename T > - void JSONIOHandlerImpl::DatasetWriter::operator()( - nlohmann::json & json, - const Parameter< Operation::WRITE_DATASET > & parameters - ) +template +nlohmann::json JSONIOHandlerImpl::CppToJSON >::operator()( + const std::vector &v) +{ + nlohmann::json j; + CppToJSON ctj; + for (auto const &a : v) { - CppToJSON< T > ctj; - syncMultidimensionalJson( - json["data"], - parameters.offset, - parameters.extent, - getMultiplicators( parameters.extent ), - [&ctj]( - nlohmann::json & j, - T const & data - ) - { - j = ctj( data ); - }, - static_cast(parameters.data - .get( )) - ); - } - - - template< typename T > - void JSONIOHandlerImpl::DatasetReader::operator()( - nlohmann::json & json, - Parameter< Operation::READ_DATASET > & parameters - ) - { - JsonToCpp< - T - > jtc; - syncMultidimensionalJson( - json, - parameters.offset, - parameters.extent, - getMultiplicators( parameters.extent ), - [&jtc]( - nlohmann::json & j, - T & data - ) - { - data = jtc( j ); - }, - static_cast(parameters.data - .get( )) - ); + j.emplace_back(ctj(a)); } + return j; +} - - template< typename T > - void JSONIOHandlerImpl::AttributeWriter::operator()( - nlohmann::json & value, - Attribute::resource const & resource - ) +template +nlohmann::json JSONIOHandlerImpl::CppToJSON >::operator()( + const std::array &v) +{ + nlohmann::json j; + CppToJSON ctj; + for (auto const &a : v) { - CppToJSON< T > ctj; - value = ctj( variantSrc::get< T >( resource ) ); + j.emplace_back(ctj(a)); } + return j; +} +template +T JSONIOHandlerImpl::JsonToCpp::operator()(nlohmann::json const &json) +{ + return json.get(); +} - template< typename T > - void JSONIOHandlerImpl::AttributeReader::operator()( - nlohmann::json & json, - Parameter< Operation::READ_ATT > & parameters - ) +template +std::vector JSONIOHandlerImpl::JsonToCpp >::operator()( + nlohmann::json const &json) +{ + std::vector v; + JsonToCpp jtp; + for (auto const &j : json) { - JsonToCpp< - T - > jtc; - *parameters.resource = jtc( - json - ); + v.emplace_back(jtp(j)); } + return v; +} - - template< typename T > - nlohmann::json - JSONIOHandlerImpl::CppToJSON< T >::operator()( const T & val ) +template +std::array JSONIOHandlerImpl::JsonToCpp >::operator()( + nlohmann::json const &json) +{ + std::array a; + JsonToCpp jtp; + size_t i = 0; + for (auto const &j : json) { - return nlohmann::json( val ); + a[i] = jtp(j); + i++; } - - - template< typename T > - nlohmann::json - JSONIOHandlerImpl::CppToJSON< std::vector< T > >::operator()( const std::vector< T > & v ) + return a; +} + +template +T JSONIOHandlerImpl::JsonToCpp< + T, + typename std::enable_if::value>::type>:: +operator()(nlohmann::json const &j) +{ + try { - nlohmann::json j; - CppToJSON< T > ctj; - for( auto const & a: v ) - { - j.emplace_back( ctj( a ) ); - } - return j; - } - - - template< typename T, int n > - nlohmann::json JSONIOHandlerImpl::CppToJSON< - std::array< - T, - n - > - >::operator()( - const std::array< - T, - n - > & v - ) - { - nlohmann::json j; - CppToJSON< T > ctj; - for( auto const & a: v ) - { - j.emplace_back( ctj( a ) ); - } - return j; + return j.get(); } - - - template< - typename T, - typename Dummy - > - T JSONIOHandlerImpl::JsonToCpp< - T, - Dummy - >::operator()( nlohmann::json const & json ) - { return json.get< T >( ); } - - - template< typename T > - std::vector< T > - JSONIOHandlerImpl::JsonToCpp< std::vector< T > >::operator()( nlohmann::json const & json ) + catch (...) { - std::vector< T > v; - JsonToCpp< T > jtp; - for( auto const & j: json ) - { - v.emplace_back( jtp( j ) ); - } - return v; - } - - - template< typename T, int n > - std::array< - T, - n - > JSONIOHandlerImpl::JsonToCpp< - std::array< - T, - n - > - >::operator()( nlohmann::json const & json ) - { - std::array< - T, - n - > a; - JsonToCpp< T > jtp; - size_t i = 0; - for( auto const & j: json ) - { - a[i] = jtp( j ); - i++; - } - return a; - } - - - template< - typename T - > - T JSONIOHandlerImpl::JsonToCpp< - T, - typename std::enable_if< - std::is_floating_point< - T - >::value - >::type - >::operator()( nlohmann::json const & j ) { - try { - return j.get(); - } catch (...) { - return std::numeric_limits::quiet_NaN(); - } + return std::numeric_limits::quiet_NaN(); } +} } // namespace openPMD diff --git a/src/Iteration.cpp b/src/Iteration.cpp index 5b932b59cd..7026dd90e3 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -30,165 +30,160 @@ #include #include - namespace openPMD { Iteration::Iteration() - : meshes{Container< Mesh >()}, - particles{Container< ParticleSpecies >()} + : meshes{Container()}, particles{Container()} { - setTime(static_cast< double >(0)); - setDt(static_cast< double >(1)); + setTime(static_cast(0)); + setDt(static_cast(1)); setTimeUnitSI(1); - meshes.writable().ownKeyWithinParent = { "meshes" }; - particles.writable().ownKeyWithinParent = { "particles" }; + meshes.writable().ownKeyWithinParent = {"meshes"}; + particles.writable().ownKeyWithinParent = {"particles"}; } -template< typename T > -Iteration& -Iteration::setTime(T newTime) +template +Iteration &Iteration::setTime(T newTime) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("time", newTime); return *this; } -template< typename T > -Iteration& -Iteration::setDt(T newDt) +template +Iteration &Iteration::setDt(T newDt) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("dt", newDt); return *this; } -double -Iteration::timeUnitSI() const +double Iteration::timeUnitSI() const { - return getAttribute("timeUnitSI").get< double >(); + return getAttribute("timeUnitSI").get(); } -Iteration& -Iteration::setTimeUnitSI(double newTimeUnitSI) +Iteration &Iteration::setTimeUnitSI(double newTimeUnitSI) { setAttribute("timeUnitSI", newTimeUnitSI); return *this; } -using iterator_t = Container< Iteration, uint64_t >::iterator; +using iterator_t = Container::iterator; -Iteration & -Iteration::close( bool _flush ) +Iteration &Iteration::close(bool _flush) { using bool_type = unsigned char; - if( this->IOHandler()->m_frontendAccess != Access::READ_ONLY ) + if (this->IOHandler()->m_frontendAccess != Access::READ_ONLY) { - setAttribute< bool_type >( "closed", 1u ); + setAttribute("closed", 1u); } StepStatus flag = getStepStatus(); // update close status - switch( *m_closed ) - { - case CloseStatus::Open: - case CloseStatus::ClosedInFrontend: + switch (*m_closed) + { + case CloseStatus::Open: + case CloseStatus::ClosedInFrontend: + *m_closed = CloseStatus::ClosedInFrontend; + break; + case CloseStatus::ClosedTemporarily: + // should we bother to reopen? + if (dirtyRecursive()) + { + // let's reopen *m_closed = CloseStatus::ClosedInFrontend; - break; - case CloseStatus::ClosedTemporarily: - // should we bother to reopen? - if( dirtyRecursive() ) - { - // let's reopen - *m_closed = CloseStatus::ClosedInFrontend; - } - else - { - // don't reopen - *m_closed = CloseStatus::ClosedInBackend; - } - break; - case CloseStatus::ParseAccessDeferred: - case CloseStatus::ClosedInBackend: - // just keep it like it is - // (this means that closing an iteration that has not been parsed - // yet keeps it re-openable) - break; + } + else + { + // don't reopen + *m_closed = CloseStatus::ClosedInBackend; + } + break; + case CloseStatus::ParseAccessDeferred: + case CloseStatus::ClosedInBackend: + // just keep it like it is + // (this means that closing an iteration that has not been parsed + // yet keeps it re-openable) + break; } - if( _flush ) + if (_flush) { - if( flag == StepStatus::DuringStep ) + if (flag == StepStatus::DuringStep) { endStep(); - setStepStatus( StepStatus::NoStep ); + setStepStatus(StepStatus::NoStep); } else { // flush things manually - internal::SeriesInternal * s = &retrieveSeries(); + internal::SeriesInternal *s = &retrieveSeries(); // figure out my iteration number - auto begin = s->indexOf( *this ); + auto begin = s->indexOf(*this); auto end = begin; ++end; - s->flush_impl( begin, end, FlushLevel::UserFlush ); + s->flush_impl(begin, end, {FlushLevel::UserFlush}); } } else { - if( flag == StepStatus::DuringStep ) + if (flag == StepStatus::DuringStep) { - throw std::runtime_error( "Using deferred Iteration::close " - "unimplemented in auto-stepping mode." ); + throw std::runtime_error( + "Using deferred Iteration::close " + "unimplemented in auto-stepping mode."); } } return *this; } -Iteration & -Iteration::open() +Iteration &Iteration::open() { - if( *m_closed == CloseStatus::ParseAccessDeferred ) + if (*m_closed == CloseStatus::ParseAccessDeferred) { *m_closed = CloseStatus::Open; + runDeferredParseAccess(); } - runDeferredParseAccess(); - internal::SeriesInternal * s = &retrieveSeries(); + internal::SeriesInternal *s = &retrieveSeries(); // figure out my iteration number - auto begin = s->indexOf( *this ); - s->openIteration( begin->first, *this ); - IOHandler()->flush(); + auto begin = s->indexOf(*this); + s->openIteration(begin->first, *this); + IOHandler()->flush(internal::defaultFlushParams); return *this; } -bool -Iteration::closed() const +bool Iteration::closed() const { - switch( *m_closed ) - { - case CloseStatus::ParseAccessDeferred: - case CloseStatus::Open: - /* - * Temporarily closing a file is something that the openPMD API - * does for optimization purposes. - * Logically to the user, it is still open. - */ - case CloseStatus::ClosedTemporarily: - return false; - case CloseStatus::ClosedInFrontend: - case CloseStatus::ClosedInBackend: - return true; + switch (*m_closed) + { + case CloseStatus::ParseAccessDeferred: + case CloseStatus::Open: + /* + * Temporarily closing a file is something that the openPMD API + * does for optimization purposes. + * Logically to the user, it is still open. + */ + case CloseStatus::ClosedTemporarily: + return false; + case CloseStatus::ClosedInFrontend: + case CloseStatus::ClosedInBackend: + return true; } - throw std::runtime_error( "Unreachable!" ); + throw std::runtime_error("Unreachable!"); } -bool -Iteration::closedByWriter() const +bool Iteration::closedByWriter() const { using bool_type = unsigned char; - if( containsAttribute( "closed" ) ) + if (containsAttribute("closed")) { - return getAttribute( "closed" ).get< bool_type >() == 0u ? false : true; + return getAttribute("closed").get() == 0u ? false : true; } else { @@ -196,309 +191,338 @@ Iteration::closedByWriter() const } } -void -Iteration::flushFileBased(std::string const& filename, uint64_t i) +void Iteration::flushFileBased( + std::string const &filename, + uint64_t i, + internal::FlushParams const &flushParams) { /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ - internal::SeriesInternal * s = &retrieveSeries(); - if( s == nullptr ) - throw std::runtime_error("[Iteration::flushFileBased] Series* is a nullptr"); + internal::SeriesInternal *s = &retrieveSeries(); + if (s == nullptr) + throw std::runtime_error( + "[Iteration::flushFileBased] Series* is a nullptr"); - if( !written() ) + if (!written()) { /* create file */ - Parameter< Operation::CREATE_FILE > fCreate; + Parameter fCreate; fCreate.name = filename; IOHandler()->enqueue(IOTask(s, fCreate)); /* create basePath */ - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = auxiliary::replace_first(s->basePath(), "%T/", ""); IOHandler()->enqueue(IOTask(&s->iterations, pCreate)); /* create iteration path */ pCreate.path = std::to_string(i); IOHandler()->enqueue(IOTask(this, pCreate)); - } else + } + else { // operations for create mode - if((IOHandler()->m_frontendAccess == Access::CREATE ) && - ( (IOHandler()->backendName() == "MPI_ADIOS1") || (IOHandler()->backendName() == "ADIOS1") ) ) + if ((IOHandler()->m_frontendAccess == Access::CREATE) && + ((IOHandler()->backendName() == "MPI_ADIOS1") || + (IOHandler()->backendName() == "ADIOS1"))) { - Parameter< Operation::OPEN_FILE > fOpen; + Parameter fOpen; fOpen.name = filename; fOpen.encoding = IterationEncoding::fileBased; IOHandler()->enqueue(IOTask(s, fOpen)); - flush(); + flush(flushParams); return; } // operations for read/read-write mode /* open file */ - s->openIteration( i, *this ); + s->openIteration(i, *this); } - flush(); + switch (flushParams.flushLevel) + { + case FlushLevel::CreateOrOpenFiles: + break; + case FlushLevel::SkeletonOnly: + case FlushLevel::InternalFlush: + case FlushLevel::UserFlush: + flush(flushParams); + break; + } } -void -Iteration::flushGroupBased(uint64_t i) +void Iteration::flushGroupBased( + uint64_t i, internal::FlushParams const &flushParams) { - if( !written() ) + if (!written()) { /* create iteration path */ - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = std::to_string(i); IOHandler()->enqueue(IOTask(this, pCreate)); } - flush(); + switch (flushParams.flushLevel) + { + case FlushLevel::CreateOrOpenFiles: + break; + case FlushLevel::SkeletonOnly: + case FlushLevel::InternalFlush: + case FlushLevel::UserFlush: + flush(flushParams); + break; + } } -void -Iteration::flushVariableBased( uint64_t i ) +void Iteration::flushVariableBased( + uint64_t i, internal::FlushParams const &flushParams) { - if( !written() ) + if (!written()) { /* create iteration path */ - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; pOpen.path = ""; - IOHandler()->enqueue( IOTask( this, pOpen ) ); - this->setAttribute( "snapshot", i ); + IOHandler()->enqueue(IOTask(this, pOpen)); + this->setAttribute("snapshot", i); } - flush(); + switch (flushParams.flushLevel) + { + case FlushLevel::CreateOrOpenFiles: + break; + case FlushLevel::SkeletonOnly: + case FlushLevel::InternalFlush: + case FlushLevel::UserFlush: + flush(flushParams); + break; + } } -void -Iteration::flush() +void Iteration::flush(internal::FlushParams const &flushParams) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& m : meshes ) - m.second.flush(m.first); - for( auto& species : particles ) - species.second.flush(species.first); - } else + for (auto &m : meshes) + m.second.flush(m.first, flushParams); + for (auto &species : particles) + species.second.flush(species.first, flushParams); + } + else { /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ - internal::SeriesInternal * s = &retrieveSeries(); + internal::SeriesInternal *s = &retrieveSeries(); - if( !meshes.empty() || s->containsAttribute("meshesPath") ) + if (!meshes.empty() || s->containsAttribute("meshesPath")) { - if( !s->containsAttribute("meshesPath") ) + if (!s->containsAttribute("meshesPath")) { s->setMeshesPath("meshes/"); s->flushMeshesPath(); } - meshes.flush(s->meshesPath()); - for( auto& m : meshes ) - m.second.flush(m.first); + meshes.flush(s->meshesPath(), flushParams); + for (auto &m : meshes) + m.second.flush(m.first, flushParams); } else { meshes.dirty() = false; } - if( !particles.empty() || s->containsAttribute("particlesPath") ) + if (!particles.empty() || s->containsAttribute("particlesPath")) { - if( !s->containsAttribute("particlesPath") ) + if (!s->containsAttribute("particlesPath")) { s->setParticlesPath("particles/"); s->flushParticlesPath(); } - particles.flush(s->particlesPath()); - for( auto& species : particles ) - species.second.flush(species.first); + particles.flush(s->particlesPath(), flushParams); + for (auto &species : particles) + species.second.flush(species.first, flushParams); } else { particles.dirty() = false; } - flushAttributes(); + flushAttributes(flushParams); } } -void Iteration::deferParseAccess( DeferredParseAccess dr ) +void Iteration::deferParseAccess(DeferredParseAccess dr) { *m_deferredParseAccess = - auxiliary::makeOption< DeferredParseAccess >( std::move( dr ) ); -} - -void Iteration::read() -{ - if( !m_deferredParseAccess->has_value() ) - { - return; - } - auto const & deferred = m_deferredParseAccess->get(); - if( deferred.fileBased ) - { - readFileBased( deferred.filename, deferred.path ); - } - else - { - readGorVBased( deferred.path ); - } - // reset this thing - *m_deferredParseAccess = auxiliary::Option< DeferredParseAccess >(); + auxiliary::makeOption(std::move(dr)); } -void Iteration::reread( std::string const & path ) +void Iteration::reread(std::string const &path) { - if( m_deferredParseAccess->has_value() ) + if (m_deferredParseAccess->has_value()) { throw std::runtime_error( "[Iteration] Internal control flow error: Trying to reread an " - "iteration that has not yet been read for its first time." ); + "iteration that has not yet been read for its first time."); } - read_impl( path ); + read_impl(path); } void Iteration::readFileBased( - std::string filePath, std::string const & groupPath ) + std::string filePath, std::string const &groupPath, bool doBeginStep) { - auto & series = retrieveSeries(); + if (doBeginStep) + { + /* + * beginStep() must take care to open files + */ + beginStep(/* reread = */ false); + } + auto &series = retrieveSeries(); - series.readOneIterationFileBased( filePath ); + series.readOneIterationFileBased(filePath); + *m_overrideFilebasedFilename = + auxiliary::makeOption(std::move(filePath)); - read_impl( groupPath ); + read_impl(groupPath); } -void Iteration::readGorVBased( std::string const & groupPath ) +void Iteration::readGorVBased(std::string const &groupPath, bool doBeginStep) { - - read_impl(groupPath ); + if (doBeginStep) + { + /* + * beginStep() must take care to open files + */ + beginStep(/* reread = */ false); + } + read_impl(groupPath); } -void Iteration::read_impl( std::string const & groupPath ) +void Iteration::read_impl(std::string const &groupPath) { - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; pOpen.path = groupPath; - IOHandler()->enqueue( IOTask( this, pOpen ) ); + IOHandler()->enqueue(IOTask(this, pOpen)); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "dt"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::FLOAT ) - setDt(Attribute(*aRead.resource).get< float >()); - else if( *aRead.dtype == DT::DOUBLE ) - setDt(Attribute(*aRead.resource).get< double >()); - else if( *aRead.dtype == DT::LONG_DOUBLE ) - setDt(Attribute(*aRead.resource).get< long double >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::FLOAT) + setDt(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::DOUBLE) + setDt(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::LONG_DOUBLE) + setDt(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'dt'"); aRead.name = "time"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::FLOAT ) - setTime(Attribute(*aRead.resource).get< float >()); - else if( *aRead.dtype == DT::DOUBLE ) - setTime(Attribute(*aRead.resource).get< double >()); - else if( *aRead.dtype == DT::LONG_DOUBLE ) - setTime(Attribute(*aRead.resource).get< long double >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::FLOAT) + setTime(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::DOUBLE) + setTime(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::LONG_DOUBLE) + setTime(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'time'"); aRead.name = "timeUnitSI"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::DOUBLE ) - setTimeUnitSI(Attribute(*aRead.resource).get< double >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::DOUBLE) + setTimeUnitSI(Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'timeUnitSI'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'timeUnitSI'"); /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ - internal::SeriesInternal * s = &retrieveSeries(); + internal::SeriesInternal *s = &retrieveSeries(); - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; std::string version = s->openPMD(); bool hasMeshes = false; bool hasParticles = false; - if( version == "1.0.0" || version == "1.0.1" ) + if (version == "1.0.0" || version == "1.0.1") { IOHandler()->enqueue(IOTask(this, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); hasMeshes = std::count( - pList.paths->begin(), - pList.paths->end(), - auxiliary::replace_last(s->meshesPath(), "/", "") - ) == 1; - hasParticles = std::count( - pList.paths->begin(), - pList.paths->end(), - auxiliary::replace_last(s->particlesPath(), "/", "") - ) == 1; + pList.paths->begin(), + pList.paths->end(), + auxiliary::replace_last(s->meshesPath(), "/", "")) == 1; + hasParticles = + std::count( + pList.paths->begin(), + pList.paths->end(), + auxiliary::replace_last(s->particlesPath(), "/", "")) == 1; pList.paths->clear(); - } else + } + else { hasMeshes = s->containsAttribute("meshesPath"); hasParticles = s->containsAttribute("particlesPath"); } - if( hasMeshes ) + if (hasMeshes) { pOpen.path = s->meshesPath(); IOHandler()->enqueue(IOTask(&meshes, pOpen)); - meshes.readAttributes( ReadMode::FullyReread ); + meshes.readAttributes(ReadMode::FullyReread); auto map = meshes.eraseStaleEntries(); /* obtain all non-scalar meshes */ IOHandler()->enqueue(IOTask(&meshes, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::LIST_ATTS > aList; - for( auto const& mesh_name : *pList.paths ) + Parameter aList; + for (auto const &mesh_name : *pList.paths) { - Mesh& m = map[mesh_name]; + Mesh &m = map[mesh_name]; pOpen.path = mesh_name; aList.attributes->clear(); IOHandler()->enqueue(IOTask(&m, pOpen)); IOHandler()->enqueue(IOTask(&m, aList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); auto att_begin = aList.attributes->begin(); auto att_end = aList.attributes->end(); auto value = std::find(att_begin, att_end, "value"); auto shape = std::find(att_begin, att_end, "shape"); - if( value != att_end && shape != att_end ) + if (value != att_end && shape != att_end) { - MeshRecordComponent& mrc = m[MeshRecordComponent::SCALAR]; + MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; mrc.parent() = m.parent(); IOHandler()->enqueue(IOTask(&mrc, pOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); *mrc.m_isConstant = true; } m.read(); } /* obtain all scalar meshes */ - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(&meshes, dList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& mesh_name : *dList.datasets ) + Parameter dOpen; + for (auto const &mesh_name : *dList.datasets) { - Mesh& m = map[mesh_name]; + Mesh &m = map[mesh_name]; dOpen.name = mesh_name; IOHandler()->enqueue(IOTask(&m, dOpen)); - IOHandler()->flush(); - MeshRecordComponent& mrc = m[MeshRecordComponent::SCALAR]; + IOHandler()->flush(internal::defaultFlushParams); + MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; mrc.parent() = m.parent(); IOHandler()->enqueue(IOTask(&mrc, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); mrc.written() = false; mrc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); mrc.written() = true; @@ -510,25 +534,25 @@ void Iteration::read_impl( std::string const & groupPath ) meshes.dirty() = false; } - if( hasParticles ) + if (hasParticles) { pOpen.path = s->particlesPath(); IOHandler()->enqueue(IOTask(&particles, pOpen)); - particles.readAttributes( ReadMode::FullyReread ); + particles.readAttributes(ReadMode::FullyReread); /* obtain all particle species */ pList.paths->clear(); IOHandler()->enqueue(IOTask(&particles, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); auto map = particles.eraseStaleEntries(); - for( auto const& species_name : *pList.paths ) + for (auto const &species_name : *pList.paths) { - ParticleSpecies& p = map[species_name]; + ParticleSpecies &p = map[species_name]; pOpen.path = species_name; IOHandler()->enqueue(IOTask(&p, pOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); p.read(); } } @@ -537,47 +561,47 @@ void Iteration::read_impl( std::string const & groupPath ) particles.dirty() = false; } - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } -AdvanceStatus -Iteration::beginStep() +AdvanceStatus Iteration::beginStep(bool reread) { using IE = IterationEncoding; - auto & series = retrieveSeries(); + auto &series = retrieveSeries(); // Initialize file with this to quiet warnings // The following switch is comprehensive - internal::AttributableData * file = nullptr; - switch( series.iterationEncoding() ) + internal::AttributableData *file = nullptr; + switch (series.iterationEncoding()) { - case IE::fileBased: - file = m_attributableData.get(); - break; - case IE::groupBased: - case IE::variableBased: - file = &series; - break; + case IE::fileBased: + file = m_attributableData.get(); + break; + case IE::groupBased: + case IE::variableBased: + file = &series; + break; } AdvanceStatus status = series.advance( - AdvanceMode::BEGINSTEP, *file, series.indexOf( *this ), *this ); - if( status != AdvanceStatus::OK ) + AdvanceMode::BEGINSTEP, *file, series.indexOf(*this), *this); + if (status != AdvanceStatus::OK) { return status; } // re-read -> new datasets might be available - if( ( series.iterationEncoding() == IE::groupBased || - series.iterationEncoding() == IE::variableBased ) && - ( this->IOHandler()->m_frontendAccess == Access::READ_ONLY || - this->IOHandler()->m_frontendAccess == Access::READ_WRITE ) ) + if (reread && + (series.iterationEncoding() == IE::groupBased || + series.iterationEncoding() == IE::variableBased) && + (this->IOHandler()->m_frontendAccess == Access::READ_ONLY || + this->IOHandler()->m_frontendAccess == Access::READ_WRITE)) { bool previous = series.iterations.written(); series.iterations.written() = false; auto oldType = this->IOHandler()->m_frontendAccess; auto newType = - const_cast< Access * >( &this->IOHandler()->m_frontendAccess ); + const_cast(&this->IOHandler()->m_frontendAccess); *newType = Access::READ_WRITE; - series.readGorVBased( false ); + series.readGorVBased(false); *newType = oldType; series.iterations.written() = previous; } @@ -585,86 +609,81 @@ Iteration::beginStep() return status; } -void -Iteration::endStep() +void Iteration::endStep() { using IE = IterationEncoding; - auto & series = retrieveSeries(); + auto &series = retrieveSeries(); // Initialize file with this to quiet warnings // The following switch is comprehensive - internal::AttributableData * file = nullptr; - switch( series.iterationEncoding() ) + internal::AttributableData *file = nullptr; + switch (series.iterationEncoding()) { - case IE::fileBased: - file = m_attributableData.get(); - break; - case IE::groupBased: - case IE::variableBased: - file = &series; - break; + case IE::fileBased: + file = m_attributableData.get(); + break; + case IE::groupBased: + case IE::variableBased: + file = &series; + break; } // @todo filebased check - series.advance( - AdvanceMode::ENDSTEP, *file, series.indexOf( *this ), *this ); + series.advance(AdvanceMode::ENDSTEP, *file, series.indexOf(*this), *this); } -StepStatus -Iteration::getStepStatus() +StepStatus Iteration::getStepStatus() { - internal::SeriesInternal * s = &retrieveSeries(); - switch( s->iterationEncoding() ) + internal::SeriesInternal *s = &retrieveSeries(); + switch (s->iterationEncoding()) { using IE = IterationEncoding; - case IE::fileBased: - return *this->m_stepStatus; - case IE::groupBased: - case IE::variableBased: - return s->m_stepStatus; - default: - throw std::runtime_error( "[Iteration] unreachable" ); + case IE::fileBased: + return *this->m_stepStatus; + case IE::groupBased: + case IE::variableBased: + return s->m_stepStatus; + default: + throw std::runtime_error("[Iteration] unreachable"); } } -void -Iteration::setStepStatus( StepStatus status ) +void Iteration::setStepStatus(StepStatus status) { - internal::SeriesInternal * s = &retrieveSeries(); - switch( s->iterationEncoding() ) + internal::SeriesInternal *s = &retrieveSeries(); + switch (s->iterationEncoding()) { using IE = IterationEncoding; - case IE::fileBased: - *this->m_stepStatus = status; - break; - case IE::groupBased: - case IE::variableBased: - s->m_stepStatus = status; - break; - default: - throw std::runtime_error( "[Iteration] unreachable" ); + case IE::fileBased: + *this->m_stepStatus = status; + break; + case IE::groupBased: + case IE::variableBased: + s->m_stepStatus = status; + break; + default: + throw std::runtime_error("[Iteration] unreachable"); } } -bool -Iteration::dirtyRecursive() const +bool Iteration::dirtyRecursive() const { - if( dirty() ) + if (dirty()) { return true; } - if( particles.dirty() || meshes.dirty() ) + if (particles.dirty() || meshes.dirty()) { return true; } - for( auto const & pair : particles ) + for (auto const &pair : particles) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } } - for( auto const & pair : meshes ) + for (auto const &pair : meshes) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } @@ -672,8 +691,7 @@ Iteration::dirtyRecursive() const return false; } -void -Iteration::linkHierarchy(Writable& w) +void Iteration::linkHierarchy(Writable &w) { AttributableInterface::linkHierarchy(w); meshes.linkHierarchy(this->writable()); @@ -682,51 +700,56 @@ Iteration::linkHierarchy(Writable& w) void Iteration::runDeferredParseAccess() { - if( IOHandler()->m_frontendAccess == Access::CREATE ) + if (IOHandler()->m_frontendAccess == Access::CREATE) + { + return; + } + + if (!m_deferredParseAccess->has_value()) { return; } + auto const &deferred = m_deferredParseAccess->get(); + auto oldAccess = IOHandler()->m_frontendAccess; - auto newAccess = - const_cast< Access * >( &IOHandler()->m_frontendAccess ); + auto newAccess = const_cast(&IOHandler()->m_frontendAccess); *newAccess = Access::READ_WRITE; try { - read(); + if (deferred.fileBased) + { + readFileBased(deferred.filename, deferred.path, deferred.beginStep); + } + else + { + readGorVBased(deferred.path, deferred.beginStep); + } } - catch( ... ) + catch (...) { + // reset this thing + *m_deferredParseAccess = auxiliary::Option(); *newAccess = oldAccess; throw; } + // reset this thing + *m_deferredParseAccess = auxiliary::Option(); *newAccess = oldAccess; } -template float -Iteration::time< float >() const; -template double -Iteration::time< double >() const; -template long double -Iteration::time< long double >() const; - -template float -Iteration::dt< float >() const; -template double -Iteration::dt< double >() const; -template long double -Iteration::dt< long double >() const; - -template -Iteration& Iteration::setTime< float >(float time); -template -Iteration& Iteration::setTime< double >(double time); -template -Iteration& Iteration::setTime< long double >(long double time); - -template -Iteration& Iteration::setDt< float >(float dt); -template -Iteration& Iteration::setDt< double >(double dt); -template -Iteration& Iteration::setDt< long double >(long double dt); -} // openPMD +template float Iteration::time() const; +template double Iteration::time() const; +template long double Iteration::time() const; + +template float Iteration::dt() const; +template double Iteration::dt() const; +template long double Iteration::dt() const; + +template Iteration &Iteration::setTime(float time); +template Iteration &Iteration::setTime(double time); +template Iteration &Iteration::setTime(long double time); + +template Iteration &Iteration::setDt(float dt); +template Iteration &Iteration::setDt(double dt); +template Iteration &Iteration::setDt(long double dt); +} // namespace openPMD diff --git a/src/IterationEncoding.cpp b/src/IterationEncoding.cpp index e463233b42..b86bb1c672 100644 --- a/src/IterationEncoding.cpp +++ b/src/IterationEncoding.cpp @@ -22,21 +22,20 @@ #include - -std::ostream& -openPMD::operator<<(std::ostream& os, openPMD::IterationEncoding const& ie) +std::ostream & +openPMD::operator<<(std::ostream &os, openPMD::IterationEncoding const &ie) { - switch( ie ) + switch (ie) { - case openPMD::IterationEncoding::fileBased: - os << "fileBased"; - break; - case openPMD::IterationEncoding::groupBased: - os << "groupBased"; - break; - case openPMD::IterationEncoding::variableBased: - os << "variableBased"; - break; + case openPMD::IterationEncoding::fileBased: + os << "fileBased"; + break; + case openPMD::IterationEncoding::groupBased: + os << "groupBased"; + break; + case openPMD::IterationEncoding::variableBased: + os << "variableBased"; + break; } return os; } diff --git a/src/Mesh.cpp b/src/Mesh.cpp index 0e09cfc06b..35c1e4d61a 100644 --- a/src/Mesh.cpp +++ b/src/Mesh.cpp @@ -36,353 +36,358 @@ Mesh::Mesh() setGeometry(Geometry::cartesian); setDataOrder(DataOrder::C); - setAxisLabels({"x"}); //empty strings are not allowed in HDF5 - setGridSpacing(std::vector< double >{1}); + setAxisLabels({"x"}); // empty strings are not allowed in HDF5 + setGridSpacing(std::vector{1}); setGridGlobalOffset({0}); setGridUnitSI(1); } -Mesh::Geometry -Mesh::geometry() const +Mesh::Geometry Mesh::geometry() const { std::string ret = geometryString(); - if( "cartesian" == ret ) { return Geometry::cartesian; } - else if( "thetaMode" == ret ) { return Geometry::thetaMode; } - else if( "cylindrical" == ret ) { return Geometry::cylindrical; } - else if( "spherical" == ret ) { return Geometry::spherical; } - else { return Geometry::other; } + if ("cartesian" == ret) + { + return Geometry::cartesian; + } + else if ("thetaMode" == ret) + { + return Geometry::thetaMode; + } + else if ("cylindrical" == ret) + { + return Geometry::cylindrical; + } + else if ("spherical" == ret) + { + return Geometry::spherical; + } + else + { + return Geometry::other; + } } std::string Mesh::geometryString() const { - return getAttribute( "geometry" ).get< std::string >(); + return getAttribute("geometry").get(); } -Mesh& -Mesh::setGeometry(Mesh::Geometry g) +Mesh &Mesh::setGeometry(Mesh::Geometry g) { - switch( g ) + switch (g) { - case Geometry::cartesian: - setAttribute("geometry", std::string("cartesian")); - break; - case Geometry::thetaMode: - setAttribute("geometry", std::string("thetaMode")); - break; - case Geometry::cylindrical: - setAttribute("geometry", std::string("cylindrical")); - break; - case Geometry::spherical: - setAttribute("geometry", std::string("spherical")); - break; - case Geometry::other: - // use the std::string overload to be more specific - setAttribute("geometry", std::string("other")); - break; + case Geometry::cartesian: + setAttribute("geometry", std::string("cartesian")); + break; + case Geometry::thetaMode: + setAttribute("geometry", std::string("thetaMode")); + break; + case Geometry::cylindrical: + setAttribute("geometry", std::string("cylindrical")); + break; + case Geometry::spherical: + setAttribute("geometry", std::string("spherical")); + break; + case Geometry::other: + // use the std::string overload to be more specific + setAttribute("geometry", std::string("other")); + break; } return *this; } -Mesh & Mesh::setGeometry( std::string geometry ) +Mesh &Mesh::setGeometry(std::string geometry) { std::string knownGeometries[] = { - "cartesian", "thetaMode", "cylindrical", "spherical", "other" }; - if( // 1. condition: geometry is not one of the known geometries + "cartesian", "thetaMode", "cylindrical", "spherical", "other"}; + if ( // 1. condition: geometry is not one of the known geometries std::find( - std::begin( knownGeometries ), - std::end( knownGeometries ), - geometry ) == std::end( knownGeometries ) + std::begin(knownGeometries), std::end(knownGeometries), geometry) == + std::end(knownGeometries) // 2. condition: prefix is not already there - && !auxiliary::starts_with( geometry, std::string( "other:" ) ) ) + && !auxiliary::starts_with(geometry, std::string("other:"))) { geometry = "other:" + geometry; } - setAttribute( "geometry", std::move( geometry ) ); + setAttribute("geometry", std::move(geometry)); return *this; } -std::string -Mesh::geometryParameters() const +std::string Mesh::geometryParameters() const { - return getAttribute("geometryParameters").get< std::string >(); + return getAttribute("geometryParameters").get(); } -Mesh& -Mesh::setGeometryParameters(std::string const& gp) +Mesh &Mesh::setGeometryParameters(std::string const &gp) { setAttribute("geometryParameters", gp); return *this; } -Mesh::DataOrder -Mesh::dataOrder() const +Mesh::DataOrder Mesh::dataOrder() const { - return Mesh::DataOrder(getAttribute("dataOrder").get< std::string >().c_str()[0]); + return Mesh::DataOrder( + getAttribute("dataOrder").get().c_str()[0]); } -Mesh& -Mesh::setDataOrder(Mesh::DataOrder dor) +Mesh &Mesh::setDataOrder(Mesh::DataOrder dor) { - setAttribute( - "dataOrder", - std::string(1u, static_cast(dor))); + setAttribute("dataOrder", std::string(1u, static_cast(dor))); return *this; } -std::vector< std::string > -Mesh::axisLabels() const +std::vector Mesh::axisLabels() const { - return getAttribute("axisLabels").get< std::vector< std::string > >(); + return getAttribute("axisLabels").get >(); } -Mesh& -Mesh::setAxisLabels(std::vector< std::string > const & als) +Mesh &Mesh::setAxisLabels(std::vector const &als) { setAttribute("axisLabels", als); return *this; } -template< typename T, typename > -Mesh& -Mesh::setGridSpacing(std::vector< T > const & gs) +template +Mesh &Mesh::setGridSpacing(std::vector const &gs) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("gridSpacing", gs); return *this; } -template -Mesh& -Mesh::setGridSpacing(std::vector< float > const & gs); -template -Mesh& -Mesh::setGridSpacing(std::vector< double > const & gs); -template -Mesh& -Mesh::setGridSpacing(std::vector< long double > const & gs); - -std::vector< double > -Mesh::gridGlobalOffset() const +template Mesh &Mesh::setGridSpacing(std::vector const &gs); +template Mesh &Mesh::setGridSpacing(std::vector const &gs); +template Mesh &Mesh::setGridSpacing(std::vector const &gs); + +std::vector Mesh::gridGlobalOffset() const { - return getAttribute("gridGlobalOffset").get< std::vector< double> >(); + return getAttribute("gridGlobalOffset").get >(); } -Mesh& -Mesh::setGridGlobalOffset(std::vector< double > const & ggo) +Mesh &Mesh::setGridGlobalOffset(std::vector const &ggo) { setAttribute("gridGlobalOffset", ggo); return *this; } -double -Mesh::gridUnitSI() const +double Mesh::gridUnitSI() const { - return getAttribute("gridUnitSI").get< double >(); + return getAttribute("gridUnitSI").get(); } -Mesh& -Mesh::setGridUnitSI(double gusi) +Mesh &Mesh::setGridUnitSI(double gusi) { setAttribute("gridUnitSI", gusi); return *this; } -Mesh& -Mesh::setUnitDimension(std::map< UnitDimension, double > const& udim) +Mesh &Mesh::setUnitDimension(std::map const &udim) { - if( !udim.empty() ) + if (!udim.empty()) { - std::array< double, 7 > tmpUnitDimension = this->unitDimension(); - for( auto const& entry : udim ) + std::array tmpUnitDimension = this->unitDimension(); + for (auto const &entry : udim) tmpUnitDimension[static_cast(entry.first)] = entry.second; setAttribute("unitDimension", tmpUnitDimension); } return *this; } -template< typename T, typename > -Mesh& -Mesh::setTimeOffset(T to) +template +Mesh &Mesh::setTimeOffset(T to) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("timeOffset", to); return *this; } -template -Mesh& -Mesh::setTimeOffset( long double ); +template Mesh &Mesh::setTimeOffset(long double); -template -Mesh& -Mesh::setTimeOffset( double ); +template Mesh &Mesh::setTimeOffset(double); -template -Mesh& -Mesh::setTimeOffset( float ); +template Mesh &Mesh::setTimeOffset(float); -void -Mesh::flush_impl(std::string const& name) +void Mesh::flush_impl( + std::string const &name, internal::FlushParams const &flushParams) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& comp : *this ) - comp.second.flush(comp.first); - } else + for (auto &comp : *this) + comp.second.flush(comp.first, flushParams); + } + else { - if( !written() ) + if (!written()) { - if( scalar() ) + if (scalar()) { - MeshRecordComponent& mrc = at(RecordComponent::SCALAR); + MeshRecordComponent &mrc = at(RecordComponent::SCALAR); mrc.parent() = parent(); - mrc.flush(name); - IOHandler()->flush(); - writable().abstractFilePosition = mrc.writable().abstractFilePosition; + mrc.flush(name, flushParams); + IOHandler()->flush(flushParams); + writable().abstractFilePosition = + mrc.writable().abstractFilePosition; written() = true; - } else + } + else { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = name; IOHandler()->enqueue(IOTask(this, pCreate)); - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.parent() = &this->writable(); } } - if( scalar() ) + if (scalar()) { - for( auto& comp : *this ) + for (auto &comp : *this) { - comp.second.flush(name); + comp.second.flush(name, flushParams); writable().abstractFilePosition = comp.second.writable().abstractFilePosition; } } else { - for( auto& comp : *this ) - comp.second.flush(comp.first); + for (auto &comp : *this) + comp.second.flush(comp.first, flushParams); } - flushAttributes(); + flushAttributes(flushParams); } } -void -Mesh::read() +void Mesh::read() { auto map = eraseStaleEntries(); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "geometry"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { - std::string tmpGeometry = Attribute(*aRead.resource).get< std::string >(); - if( "cartesian" == tmpGeometry ) + std::string tmpGeometry = Attribute(*aRead.resource).get(); + if ("cartesian" == tmpGeometry) setGeometry(Geometry::cartesian); - else if( "thetaMode" == tmpGeometry ) + else if ("thetaMode" == tmpGeometry) setGeometry(Geometry::thetaMode); - else if( "cylindrical" == tmpGeometry ) + else if ("cylindrical" == tmpGeometry) setGeometry(Geometry::cylindrical); - else if( "spherical" == tmpGeometry ) + else if ("spherical" == tmpGeometry) setGeometry(Geometry::spherical); else setGeometry(tmpGeometry); } else - throw std::runtime_error("Unexpected Attribute datatype for 'geometry'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'geometry'"); aRead.name = "dataOrder"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::CHAR ) - setDataOrder(static_cast(Attribute(*aRead.resource).get< char >())); - else if( *aRead.dtype == DT::STRING ) + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::CHAR) + setDataOrder( + static_cast(Attribute(*aRead.resource).get())); + else if (*aRead.dtype == DT::STRING) { - std::string tmpDataOrder = Attribute(*aRead.resource).get< std::string >(); - if( tmpDataOrder.size() == 1 ) + std::string tmpDataOrder = + Attribute(*aRead.resource).get(); + if (tmpDataOrder.size() == 1) setDataOrder(static_cast(tmpDataOrder[0])); else - throw std::runtime_error("Unexpected Attribute value for 'dataOrder': " + tmpDataOrder); + throw std::runtime_error( + "Unexpected Attribute value for 'dataOrder': " + tmpDataOrder); } else - throw std::runtime_error("Unexpected Attribute datatype for 'dataOrder'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'dataOrder'"); aRead.name = "axisLabels"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::VEC_STRING || *aRead.dtype == DT::STRING) - setAxisLabels(Attribute(*aRead.resource).get< std::vector< std::string > >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::VEC_STRING || *aRead.dtype == DT::STRING) + setAxisLabels( + Attribute(*aRead.resource).get >()); else - throw std::runtime_error("Unexpected Attribute datatype for 'axisLabels'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'axisLabels'"); aRead.name = "gridSpacing"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); Attribute a = Attribute(*aRead.resource); - if( *aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT ) - setGridSpacing(a.get< std::vector< float > >()); - else if( *aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE ) - setGridSpacing(a.get< std::vector< double > >()); - else if( *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE ) - setGridSpacing(a.get< std::vector< long double > >()); + if (*aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT) + setGridSpacing(a.get >()); + else if (*aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE) + setGridSpacing(a.get >()); + else if ( + *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE) + setGridSpacing(a.get >()); else - throw std::runtime_error("Unexpected Attribute datatype for 'gridSpacing'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'gridSpacing'"); aRead.name = "gridGlobalOffset"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE ) - setGridGlobalOffset(Attribute(*aRead.resource).get< std::vector< double > >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE) + setGridGlobalOffset( + Attribute(*aRead.resource).get >()); else - throw std::runtime_error("Unexpected Attribute datatype for 'gridGlobalOffset'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'gridGlobalOffset'"); aRead.name = "gridUnitSI"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::DOUBLE ) - setGridUnitSI(Attribute(*aRead.resource).get< double >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::DOUBLE) + setGridUnitSI(Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'gridUnitSI'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'gridUnitSI'"); - if( scalar() ) + if (scalar()) { /* using operator[] will incorrectly update parent */ map.at(MeshRecordComponent::SCALAR).read(); - } else + } + else { - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_PATH > pOpen; - for( auto const& component : *pList.paths ) + Parameter pOpen; + for (auto const &component : *pList.paths) { - MeshRecordComponent& rc = map[ component ]; + MeshRecordComponent &rc = map[component]; pOpen.path = component; IOHandler()->enqueue(IOTask(&rc, pOpen)); *rc.m_isConstant = true; rc.read(); } - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component : *dList.datasets ) + Parameter dOpen; + for (auto const &component : *dList.datasets) { - MeshRecordComponent & rc = map[ component ]; + MeshRecordComponent &rc = map[component]; dOpen.name = component; IOHandler()->enqueue(IOTask(&rc, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); rc.written() = false; rc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); rc.written() = true; @@ -392,45 +397,45 @@ Mesh::read() readBase(); - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } -} // openPMD +} // namespace openPMD -std::ostream& -openPMD::operator<<(std::ostream& os, openPMD::Mesh::Geometry const& go) +std::ostream & +openPMD::operator<<(std::ostream &os, openPMD::Mesh::Geometry const &go) { - switch( go ) + switch (go) { - case openPMD::Mesh::Geometry::cartesian: - os<<"cartesian"; - break; - case openPMD::Mesh::Geometry::thetaMode: - os<<"thetaMode"; - break; - case openPMD::Mesh::Geometry::cylindrical: - os<<"cylindrical"; - break; - case openPMD::Mesh::Geometry::spherical: - os<<"spherical"; - break; - case openPMD::Mesh::Geometry::other: - os<<"other"; - break; + case openPMD::Mesh::Geometry::cartesian: + os << "cartesian"; + break; + case openPMD::Mesh::Geometry::thetaMode: + os << "thetaMode"; + break; + case openPMD::Mesh::Geometry::cylindrical: + os << "cylindrical"; + break; + case openPMD::Mesh::Geometry::spherical: + os << "spherical"; + break; + case openPMD::Mesh::Geometry::other: + os << "other"; + break; } return os; } -std::ostream& -openPMD::operator<<(std::ostream& os, openPMD::Mesh::DataOrder const& dor) +std::ostream & +openPMD::operator<<(std::ostream &os, openPMD::Mesh::DataOrder const &dor) { - switch( dor ) + switch (dor) { - case openPMD::Mesh::DataOrder::C: - os<<'C'; - break; - case openPMD::Mesh::DataOrder::F: - os<<'F'; - break; + case openPMD::Mesh::DataOrder::C: + os << 'C'; + break; + case openPMD::Mesh::DataOrder::F: + os << 'F'; + break; } return os; } diff --git a/src/ParticlePatches.cpp b/src/ParticlePatches.cpp index 0319f408df..76017bbf94 100644 --- a/src/ParticlePatches.cpp +++ b/src/ParticlePatches.cpp @@ -20,55 +20,56 @@ */ #include "openPMD/ParticlePatches.hpp" - namespace openPMD { -size_t -ParticlePatches::numPatches() const +size_t ParticlePatches::numPatches() const { - if( this->empty() ) + if (this->empty()) return 0; return this->at("numParticles").at(RecordComponent::SCALAR).getExtent()[0]; } -void -ParticlePatches::read() +void ParticlePatches::read() { - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_PATH > pOpen; - for( auto const& record_name : *pList.paths ) + Parameter pOpen; + for (auto const &record_name : *pList.paths) { - PatchRecord& pr = (*this)[record_name]; + PatchRecord &pr = (*this)[record_name]; pOpen.path = record_name; IOHandler()->enqueue(IOTask(&pr, pOpen)); pr.read(); } - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component_name : *dList.datasets ) + Parameter dOpen; + for (auto const &component_name : *dList.datasets) { - if( !("numParticles" == component_name || "numParticlesOffset" == component_name) ) - throw std::runtime_error("Unexpected record component" + component_name + "in particlePatch"); + if (!("numParticles" == component_name || + "numParticlesOffset" == component_name)) + throw std::runtime_error( + "Unexpected record component" + component_name + + "in particlePatch"); - PatchRecord& pr = Container< PatchRecord >::operator[](component_name); - PatchRecordComponent& prc = pr[RecordComponent::SCALAR]; + PatchRecord &pr = Container::operator[](component_name); + PatchRecordComponent &prc = pr[RecordComponent::SCALAR]; prc.parent() = pr.parent(); dOpen.name = component_name; IOHandler()->enqueue(IOTask(&pr, dOpen)); IOHandler()->enqueue(IOTask(&prc, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - if( determineDatatype< uint64_t >() != *dOpen.dtype ) - throw std::runtime_error("Unexpected datatype for " + component_name); + if (determineDatatype() != *dOpen.dtype) + throw std::runtime_error( + "Unexpected datatype for " + component_name); /* allow all attributes to be set */ prc.written() = false; @@ -79,4 +80,4 @@ ParticlePatches::read() prc.read(); } } -} // openPMD +} // namespace openPMD diff --git a/src/ParticleSpecies.cpp b/src/ParticleSpecies.cpp index 61861b4a1a..3e85bb546e 100644 --- a/src/ParticleSpecies.cpp +++ b/src/ParticleSpecies.cpp @@ -26,168 +26,169 @@ #include #include - namespace openPMD { ParticleSpecies::ParticleSpecies() { - particlePatches.writable().ownKeyWithinParent = { "particlePatches" }; + particlePatches.writable().ownKeyWithinParent = {"particlePatches"}; } -void -ParticleSpecies::read() +void ParticleSpecies::read() { /* obtain all non-scalar records */ - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); auto map = eraseStaleEntries(); - Parameter< Operation::OPEN_PATH > pOpen; - Parameter< Operation::LIST_ATTS > aList; + Parameter pOpen; + Parameter aList; bool hasParticlePatches = false; - for( auto const& record_name : *pList.paths ) + for (auto const &record_name : *pList.paths) { - if( record_name == "particlePatches" ) + if (record_name == "particlePatches") { hasParticlePatches = true; pOpen.path = "particlePatches"; IOHandler()->enqueue(IOTask(&particlePatches, pOpen)); particlePatches.read(); - } else + } + else { - Record& r = map[record_name]; + Record &r = map[record_name]; pOpen.path = record_name; aList.attributes->clear(); IOHandler()->enqueue(IOTask(&r, pOpen)); IOHandler()->enqueue(IOTask(&r, aList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); auto att_begin = aList.attributes->begin(); auto att_end = aList.attributes->end(); auto value = std::find(att_begin, att_end, "value"); auto shape = std::find(att_begin, att_end, "shape"); - if( value != att_end && shape != att_end ) + if (value != att_end && shape != att_end) { auto scalarMap = r.eraseStaleEntries(); - RecordComponent& rc = scalarMap[RecordComponent::SCALAR]; + RecordComponent &rc = scalarMap[RecordComponent::SCALAR]; rc.parent() = r.parent(); IOHandler()->enqueue(IOTask(&rc, pOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); *rc.m_isConstant = true; } r.read(); } } - if( !hasParticlePatches ) + if (!hasParticlePatches) { - auto & container = *particlePatches.m_container; - container.erase( "numParticles" ); - container.erase( "numParticlesOffset" ); + auto &container = *particlePatches.m_container; + container.erase("numParticles"); + container.erase("numParticlesOffset"); } /* obtain all scalar records */ - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& record_name : *dList.datasets ) + Parameter dOpen; + for (auto const &record_name : *dList.datasets) { - try { - Record& r = map[record_name]; + try + { + Record &r = map[record_name]; dOpen.name = record_name; IOHandler()->enqueue(IOTask(&r, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); auto scalarMap = r.eraseStaleEntries(); - RecordComponent& rc = scalarMap[RecordComponent::SCALAR]; + RecordComponent &rc = scalarMap[RecordComponent::SCALAR]; rc.parent() = r.parent(); IOHandler()->enqueue(IOTask(&rc, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); rc.written() = false; rc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); rc.written() = true; r.read(); - } catch( std::runtime_error const & ) + } + catch (std::runtime_error const &) { std::cerr << "WARNING: Skipping invalid openPMD record '" - << record_name << "'" - << std::endl; - while( ! IOHandler()->m_work.empty() ) + << record_name << "'" << std::endl; + while (!IOHandler()->m_work.empty()) IOHandler()->m_work.pop(); - map.forget( record_name ); + map.forget(record_name); //(*this)[record_name].erase(RecordComponent::SCALAR); - //this->erase(record_name); + // this->erase(record_name); } } - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } namespace { - bool flushParticlePatches( ParticlePatches const & particlePatches ) + bool flushParticlePatches(ParticlePatches const &particlePatches) { - return particlePatches.find("numParticles") != particlePatches.end() - && particlePatches.find("numParticlesOffset") != particlePatches.end() - && particlePatches.size() >= 3; + return particlePatches.find("numParticles") != particlePatches.end() && + particlePatches.find("numParticlesOffset") != + particlePatches.end() && + particlePatches.size() >= 3; } -} +} // namespace -void -ParticleSpecies::flush(std::string const& path) +void ParticleSpecies::flush( + std::string const &path, internal::FlushParams const &flushParams) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& record : *this ) - record.second.flush(record.first); - for( auto& patch : particlePatches ) - patch.second.flush(patch.first); - } else + for (auto &record : *this) + record.second.flush(record.first, flushParams); + for (auto &patch : particlePatches) + patch.second.flush(patch.first, flushParams); + } + else { auto it = find("position"); - if ( it != end() ) + if (it != end()) it->second.setUnitDimension({{UnitDimension::L, 1}}); it = find("positionOffset"); - if ( it != end() ) + if (it != end()) it->second.setUnitDimension({{UnitDimension::L, 1}}); - Container< Record >::flush(path); + Container::flush(path, flushParams); - for( auto& record : *this ) - record.second.flush(record.first); + for (auto &record : *this) + record.second.flush(record.first, flushParams); - if( flushParticlePatches( particlePatches ) ) + if (flushParticlePatches(particlePatches)) { - particlePatches.flush("particlePatches"); - for( auto& patch : particlePatches ) - patch.second.flush(patch.first); + particlePatches.flush("particlePatches", flushParams); + for (auto &patch : particlePatches) + patch.second.flush(patch.first, flushParams); } } } -bool -ParticleSpecies::dirtyRecursive() const +bool ParticleSpecies::dirtyRecursive() const { - if( dirty() ) + if (dirty()) { return true; } - for( auto const & pair : *this ) + for (auto const &pair : *this) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } } - if( flushParticlePatches( particlePatches ) ) + if (flushParticlePatches(particlePatches)) { - for( auto const & pair : particlePatches ) + for (auto const &pair : particlePatches) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } diff --git a/src/ReadIterations.cpp b/src/ReadIterations.cpp index 14329abf5d..08dc4f9545 100644 --- a/src/ReadIterations.cpp +++ b/src/ReadIterations.cpp @@ -27,36 +27,32 @@ namespace openPMD { SeriesIterator::SeriesIterator() : m_series() -{ -} +{} -SeriesIterator::SeriesIterator( Series series ) - : m_series( std::move( series ) ) +SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) { auto it = series.get().iterations.begin(); - if( it == series.get().iterations.end() ) + if (it == series.get().iterations.end()) { *this = end(); return; } else { - auto openIteration = [ &it ]() - { + auto openIteration = [&it]() { /* * @todo * Is that really clean? * Use case: See Python ApiTest testListSeries: * Call listSeries twice. */ - if( *it->second.m_closed != - Iteration::CloseStatus::ClosedInBackend ) + if (*it->second.m_closed != Iteration::CloseStatus::ClosedInBackend) { it->second.open(); } }; AdvanceStatus status{}; - switch( series.iterationEncoding() ) + switch (series.iterationEncoding()) { case IterationEncoding::fileBased: /* @@ -66,7 +62,7 @@ SeriesIterator::SeriesIterator( Series series ) * the step after parsing the file is ok. */ openIteration(); - status = it->second.beginStep(); + status = it->second.beginStep(/* reread = */ true); break; case IterationEncoding::groupBased: case IterationEncoding::variableBased: @@ -75,83 +71,85 @@ SeriesIterator::SeriesIterator( Series series ) * access to the file until now. Better to begin a step right away, * otherwise we might get another step's data. */ - status = it->second.beginStep(); + status = it->second.beginStep(/* reread = */ true); openIteration(); break; } - if( status == AdvanceStatus::OVER ) + if (status == AdvanceStatus::OVER) { *this = end(); return; } - it->second.setStepStatus( StepStatus::DuringStep ); + it->second.setStepStatus(StepStatus::DuringStep); } m_currentIteration = it->first; } -SeriesIterator & SeriesIterator::operator++() +SeriesIterator &SeriesIterator::operator++() { - if( !m_series.has_value() ) + if (!m_series.has_value()) { *this = end(); return *this; } - Series & series = m_series.get(); - auto & iterations = series.iterations; - auto & currentIteration = iterations[ m_currentIteration ]; - if( !currentIteration.closed() ) + Series &series = m_series.get(); + auto &iterations = series.iterations; + auto ¤tIteration = iterations[m_currentIteration]; + if (!currentIteration.closed()) { currentIteration.close(); } - switch( series.iterationEncoding() ) + switch (series.iterationEncoding()) { using IE = IterationEncoding; case IE::groupBased: case IE::variableBased: { // since we are in group-based iteration layout, it does not // matter which iteration we begin a step upon - AdvanceStatus status = currentIteration.beginStep(); - if( status == AdvanceStatus::OVER ) + AdvanceStatus status{}; + status = currentIteration.beginStep(/* reread = */ true); + if (status == AdvanceStatus::OVER) { *this = end(); return *this; } - currentIteration.setStepStatus( StepStatus::DuringStep ); + currentIteration.setStepStatus(StepStatus::DuringStep); break; } default: break; } - auto it = iterations.find( m_currentIteration ); + auto it = iterations.find(m_currentIteration); auto itEnd = iterations.end(); - if( it == itEnd ) + if (it == itEnd) { *this = end(); return *this; } ++it; - if( it == itEnd ) + if (it == itEnd) { *this = end(); return *this; } m_currentIteration = it->first; - if( *it->second.m_closed != Iteration::CloseStatus::ClosedInBackend ) + if (*it->second.m_closed != Iteration::CloseStatus::ClosedInBackend) { it->second.open(); } - switch( series.iterationEncoding() ) + switch (series.iterationEncoding()) { using IE = IterationEncoding; case IE::fileBased: { - auto & iteration = series.iterations[ m_currentIteration ]; - AdvanceStatus status = iteration.beginStep(); - if( status == AdvanceStatus::OVER ) + auto &iteration = series.iterations[m_currentIteration]; + AdvanceStatus status{}; + status = iteration.beginStep(/* reread = */ true); + if (status == AdvanceStatus::OVER) { *this = end(); return *this; } - iteration.setStepStatus( StepStatus::DuringStep ); + iteration.setStepStatus(StepStatus::DuringStep); break; } default: @@ -163,18 +161,18 @@ SeriesIterator & SeriesIterator::operator++() IndexedIteration SeriesIterator::operator*() { return IndexedIteration( - m_series.get().iterations[ m_currentIteration ], m_currentIteration ); + m_series.get().iterations[m_currentIteration], m_currentIteration); } -bool SeriesIterator::operator==( SeriesIterator const & other ) const +bool SeriesIterator::operator==(SeriesIterator const &other) const { return this->m_currentIteration == other.m_currentIteration && this->m_series.has_value() == other.m_series.has_value(); } -bool SeriesIterator::operator!=( SeriesIterator const & other ) const +bool SeriesIterator::operator!=(SeriesIterator const &other) const { - return !operator==( other ); + return !operator==(other); } SeriesIterator SeriesIterator::end() @@ -182,14 +180,12 @@ SeriesIterator SeriesIterator::end() return SeriesIterator{}; } -ReadIterations::ReadIterations( Series series ) - : m_series( std::move( series ) ) -{ -} +ReadIterations::ReadIterations(Series series) : m_series(std::move(series)) +{} ReadIterations::iterator_t ReadIterations::begin() { - return iterator_t{ m_series }; + return iterator_t{m_series}; } ReadIterations::iterator_t ReadIterations::end() diff --git a/src/Record.cpp b/src/Record.cpp index 989eea975b..3f50ef510c 100644 --- a/src/Record.cpp +++ b/src/Record.cpp @@ -24,7 +24,6 @@ #include - namespace openPMD { Record::Record() @@ -32,101 +31,103 @@ Record::Record() setTimeOffset(0.f); } -Record& -Record::setUnitDimension(std::map< UnitDimension, double > const& udim) +Record &Record::setUnitDimension(std::map const &udim) { - if( !udim.empty() ) + if (!udim.empty()) { - std::array< double, 7 > tmpUnitDimension = this->unitDimension(); - for( auto const& entry : udim ) + std::array tmpUnitDimension = this->unitDimension(); + for (auto const &entry : udim) tmpUnitDimension[static_cast(entry.first)] = entry.second; setAttribute("unitDimension", tmpUnitDimension); } return *this; } -void -Record::flush_impl(std::string const& name) +void Record::flush_impl( + std::string const &name, internal::FlushParams const &flushParams) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& comp : *this ) - comp.second.flush(comp.first); - } else + for (auto &comp : *this) + comp.second.flush(comp.first, flushParams); + } + else { - if( !written() ) + if (!written()) { - if( scalar() ) + if (scalar()) { - RecordComponent& rc = at(RecordComponent::SCALAR); + RecordComponent &rc = at(RecordComponent::SCALAR); rc.parent() = parent(); - rc.flush(name); - IOHandler()->flush(); - writable().abstractFilePosition = rc.writable().abstractFilePosition; + rc.flush(name, flushParams); + IOHandler()->flush(flushParams); + writable().abstractFilePosition = + rc.writable().abstractFilePosition; written() = true; - } else + } + else { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = name; IOHandler()->enqueue(IOTask(this, pCreate)); - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.parent() = getWritable(this); } } - if( scalar() ) + if (scalar()) { - for( auto& comp : *this ) + for (auto &comp : *this) { - comp.second.flush(name); + comp.second.flush(name, flushParams); writable().abstractFilePosition = comp.second.writable().abstractFilePosition; } } else { - for( auto& comp : *this ) - comp.second.flush(comp.first); + for (auto &comp : *this) + comp.second.flush(comp.first, flushParams); } - flushAttributes(); + flushAttributes(flushParams); } } -void -Record::read() +void Record::read() { - if( scalar() ) + if (scalar()) { /* using operator[] will incorrectly update parent */ this->at(RecordComponent::SCALAR).read(); - } else + } + else { - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_PATH > pOpen; - for( auto const& component : *pList.paths ) + Parameter pOpen; + for (auto const &component : *pList.paths) { - RecordComponent& rc = (*this)[component]; + RecordComponent &rc = (*this)[component]; pOpen.path = component; IOHandler()->enqueue(IOTask(&rc, pOpen)); *rc.m_isConstant = true; rc.read(); } - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component : *dList.datasets ) + Parameter dOpen; + for (auto const &component : *dList.datasets) { - RecordComponent & rc = ( *this )[ component ]; + RecordComponent &rc = (*this)[component]; dOpen.name = component; IOHandler()->enqueue(IOTask(&rc, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); rc.written() = false; rc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); rc.written() = true; @@ -136,10 +137,10 @@ Record::read() readBase(); - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } template <> -BaseRecord::mapped_type& -BaseRecord::operator[](std::string&& key); -} // openPMD +BaseRecord::mapped_type & +BaseRecord::operator[](std::string &&key); +} // namespace openPMD diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index 05342462e7..47390c3ba5 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -18,191 +18,184 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Dataset.hpp" #include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/Series.hpp" #include "openPMD/IO/Format.hpp" +#include "openPMD/Series.hpp" +#include "openPMD/auxiliary/Memory.hpp" #include +#include +#include #include #include #include -#include -#include - namespace openPMD { // We need to instantiate this somewhere otherwise there might be linker issues // despite this thing actually being constepxr -constexpr char const * const RecordComponent::SCALAR; +constexpr char const *const RecordComponent::SCALAR; RecordComponent::RecordComponent() - : m_chunks{std::make_shared< std::queue< IOTask > >()}, - m_constantValue{std::make_shared< Attribute >(-1)} + : m_chunks{std::make_shared >()} + , m_constantValue{std::make_shared(-1)} { setUnitSI(1); resetDataset(Dataset(Datatype::CHAR, {1})); } -RecordComponent& -RecordComponent::setUnitSI(double usi) +RecordComponent &RecordComponent::setUnitSI(double usi) { setAttribute("unitSI", usi); return *this; } -RecordComponent & -RecordComponent::resetDataset( Dataset d ) +RecordComponent &RecordComponent::resetDataset(Dataset d) { - if( written() ) + if (written()) { - if( d.dtype == Datatype::UNDEFINED ) + if (d.dtype == Datatype::UNDEFINED) { d.dtype = m_dataset->dtype; } - else if( d.dtype != m_dataset->dtype ) + else if (d.dtype != m_dataset->dtype) { throw std::runtime_error( - "Cannot change the datatype of a dataset." ); + "Cannot change the datatype of a dataset."); } *m_hasBeenExtended = true; } // if( d.extent.empty() ) // throw std::runtime_error("Dataset extent must be at least 1D."); - if( std::any_of( - d.extent.begin(), - d.extent.end(), - []( Extent::value_type const & i ) { return i == 0u; } ) ) - return makeEmpty( std::move( d ) ); + if (std::any_of( + d.extent.begin(), d.extent.end(), [](Extent::value_type const &i) { + return i == 0u; + })) + return makeEmpty(std::move(d)); *m_isEmpty = false; - if( written() ) + if (written()) { - m_dataset->extend( std::move( d.extent ) ); + m_dataset->extend(std::move(d.extent)); } else { - *m_dataset = std::move( d ); + *m_dataset = std::move(d); } dirty() = true; return *this; } -uint8_t -RecordComponent::getDimensionality() const +uint8_t RecordComponent::getDimensionality() const { return m_dataset->rank; } -Extent -RecordComponent::getExtent() const +Extent RecordComponent::getExtent() const { return m_dataset->extent; } namespace detail { -struct MakeEmpty -{ - template< typename T > - RecordComponent& operator()( RecordComponent & rc, uint8_t dimensions ) + struct MakeEmpty { - return rc.makeEmpty< T >( dimensions ); - } + template + RecordComponent &operator()(RecordComponent &rc, uint8_t dimensions) + { + return rc.makeEmpty(dimensions); + } - template< unsigned int N > - RecordComponent& operator()( RecordComponent &, uint8_t ) - { - throw std::runtime_error( - "RecordComponent::makeEmpty: Unknown datatype." ); - } -}; -} + template + RecordComponent &operator()(RecordComponent &, uint8_t) + { + throw std::runtime_error( + "RecordComponent::makeEmpty: Unknown datatype."); + } + }; +} // namespace detail -RecordComponent& -RecordComponent::makeEmpty( Datatype dt, uint8_t dimensions ) +RecordComponent &RecordComponent::makeEmpty(Datatype dt, uint8_t dimensions) { static detail::MakeEmpty me; - return switchType( dt, me, *this, dimensions ); + return switchType(dt, me, *this, dimensions); } -RecordComponent& -RecordComponent::makeEmpty( Dataset d ) +RecordComponent &RecordComponent::makeEmpty(Dataset d) { - if( written() ) + if (written()) { - if( !constant() ) + if (!constant()) { throw std::runtime_error( "An empty record component's extent can only be changed" " in case it has been initialized as an empty or constant" - " record component." ); + " record component."); } - if( d.dtype == Datatype::UNDEFINED ) + if (d.dtype == Datatype::UNDEFINED) { d.dtype = m_dataset->dtype; } - else if( d.dtype != m_dataset->dtype ) + else if (d.dtype != m_dataset->dtype) { throw std::runtime_error( - "Cannot change the datatype of a dataset." ); + "Cannot change the datatype of a dataset."); } - m_dataset->extend( std::move( d.extent ) ); + m_dataset->extend(std::move(d.extent)); *m_hasBeenExtended = true; } else { - *m_dataset = std::move( d ); + *m_dataset = std::move(d); } - if( m_dataset->extent.size() == 0 ) - throw std::runtime_error( "Dataset extent must be at least 1D." ); + if (m_dataset->extent.size() == 0) + throw std::runtime_error("Dataset extent must be at least 1D."); *m_isEmpty = true; dirty() = true; - if( !written() ) + if (!written()) { - static detail::DefaultValue< RecordComponent > dv; - switchType( m_dataset->dtype, dv, *this ); + static detail::DefaultValue dv; + switchType(m_dataset->dtype, dv, *this); } return *this; } -bool -RecordComponent::empty() const +bool RecordComponent::empty() const { return *m_isEmpty; } -void -RecordComponent::flush(std::string const& name) +void RecordComponent::flush( + std::string const &name, internal::FlushParams const &flushParams) { - if( IOHandler()->m_flushLevel == FlushLevel::SkeletonOnly ) + if (flushParams.flushLevel == FlushLevel::SkeletonOnly) { *this->m_name = name; return; } - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - while( !m_chunks->empty() ) + while (!m_chunks->empty()) { IOHandler()->enqueue(m_chunks->front()); m_chunks->pop(); } - } else + } + else { - if( !written() ) + if (!written()) { - if( constant() ) + if (constant()) { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = name; IOHandler()->enqueue(IOTask(this, pCreate)); - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "value"; aWrite.dtype = m_constantValue->dtype; aWrite.resource = m_constantValue->getResource(); @@ -216,9 +209,10 @@ RecordComponent::flush(std::string const& name) aWrite.dtype = a.dtype; aWrite.resource = a.getResource(); IOHandler()->enqueue(IOTask(this, aWrite)); - } else + } + else { - Parameter< Operation::CREATE_DATASET > dCreate; + Parameter dCreate; dCreate.name = name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); @@ -230,134 +224,130 @@ RecordComponent::flush(std::string const& name) } } - if( *m_hasBeenExtended ) + if (*m_hasBeenExtended) { - if( constant() ) + if (constant()) { - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "shape"; - Attribute a( getExtent() ); + Attribute a(getExtent()); aWrite.dtype = a.dtype; aWrite.resource = a.getResource(); - IOHandler()->enqueue( IOTask( this, aWrite ) ); + IOHandler()->enqueue(IOTask(this, aWrite)); } else { - Parameter< Operation::EXTEND_DATASET > pExtend; + Parameter pExtend; pExtend.extent = m_dataset->extent; - IOHandler()->enqueue( IOTask( this, std::move( pExtend ) ) ); + IOHandler()->enqueue(IOTask(this, std::move(pExtend))); *m_hasBeenExtended = false; } } - while( !m_chunks->empty() ) + while (!m_chunks->empty()) { IOHandler()->enqueue(m_chunks->front()); m_chunks->pop(); } - flushAttributes(); + flushAttributes(flushParams); } } -void -RecordComponent::read() +void RecordComponent::read() { readBase(); } -void -RecordComponent::readBase() +void RecordComponent::readBase() { using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; - if( constant() && !empty() ) + if (constant() && !empty()) { aRead.name = "value"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); Attribute a(*aRead.resource); DT dtype = *aRead.dtype; written() = false; - switch( dtype ) + switch (dtype) { - case DT::LONG_DOUBLE: - makeConstant(a.get< long double >()); - break; - case DT::DOUBLE: - makeConstant(a.get< double >()); - break; - case DT::FLOAT: - makeConstant(a.get< float >()); - break; - case DT::CLONG_DOUBLE: - makeConstant(a.get< std::complex< long double > >()); - break; - case DT::CDOUBLE: - makeConstant(a.get< std::complex< double > >()); - break; - case DT::CFLOAT: - makeConstant(a.get< std::complex< float > >()); - break; - case DT::SHORT: - makeConstant(a.get< short >()); - break; - case DT::INT: - makeConstant(a.get< int >()); - break; - case DT::LONG: - makeConstant(a.get< long >()); - break; - case DT::LONGLONG: - makeConstant(a.get< long long >()); - break; - case DT::USHORT: - makeConstant(a.get< unsigned short >()); - break; - case DT::UINT: - makeConstant(a.get< unsigned int >()); - break; - case DT::ULONG: - makeConstant(a.get< unsigned long >()); - break; - case DT::ULONGLONG: - makeConstant(a.get< unsigned long long >()); - break; - case DT::CHAR: - makeConstant(a.get< char >()); - break; - case DT::UCHAR: - makeConstant(a.get< unsigned char >()); - break; - case DT::BOOL: - makeConstant(a.get< bool >()); - break; - default: - throw std::runtime_error("Unexpected constant datatype"); + case DT::LONG_DOUBLE: + makeConstant(a.get()); + break; + case DT::DOUBLE: + makeConstant(a.get()); + break; + case DT::FLOAT: + makeConstant(a.get()); + break; + case DT::CLONG_DOUBLE: + makeConstant(a.get >()); + break; + case DT::CDOUBLE: + makeConstant(a.get >()); + break; + case DT::CFLOAT: + makeConstant(a.get >()); + break; + case DT::SHORT: + makeConstant(a.get()); + break; + case DT::INT: + makeConstant(a.get()); + break; + case DT::LONG: + makeConstant(a.get()); + break; + case DT::LONGLONG: + makeConstant(a.get()); + break; + case DT::USHORT: + makeConstant(a.get()); + break; + case DT::UINT: + makeConstant(a.get()); + break; + case DT::ULONG: + makeConstant(a.get()); + break; + case DT::ULONGLONG: + makeConstant(a.get()); + break; + case DT::CHAR: + makeConstant(a.get()); + break; + case DT::UCHAR: + makeConstant(a.get()); + break; + case DT::BOOL: + makeConstant(a.get()); + break; + default: + throw std::runtime_error("Unexpected constant datatype"); } written() = true; aRead.name = "shape"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); a = Attribute(*aRead.resource); Extent e; // uint64_t check Datatype const attrDtype = *aRead.dtype; - if( isSame( attrDtype, determineDatatype< std::vector< uint64_t > >() ) - || isSame( attrDtype, determineDatatype< uint64_t >() ) ) - for( auto const& val : a.get< std::vector< uint64_t > >() ) - e.push_back( val ); + if (isSame(attrDtype, determineDatatype >()) || + isSame(attrDtype, determineDatatype())) + for (auto const &val : a.get >()) + e.push_back(val); else { std::ostringstream oss; - oss << "Unexpected datatype (" - << *aRead.dtype - << ") for attribute 'shape' (" - << determineDatatype< uint64_t >() + oss << "Unexpected datatype (" << *aRead.dtype + << ") for attribute 'shape' (" << determineDatatype() << " aka uint64_t)"; throw std::runtime_error(oss.str()); } @@ -369,19 +359,18 @@ RecordComponent::readBase() aRead.name = "unitSI"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::DOUBLE ) - setUnitSI(Attribute(*aRead.resource).get< double >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::DOUBLE) + setUnitSI(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'unitSI'"); - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } -bool -RecordComponent::dirtyRecursive() const +bool RecordComponent::dirtyRecursive() const { - if( this->dirty() ) + if (this->dirty()) { return true; } diff --git a/src/Series.cpp b/src/Series.cpp index e3491adf5a..433d65cb46 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -18,15 +18,15 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Date.hpp" -#include "openPMD/auxiliary/Filesystem.hpp" -#include "openPMD/auxiliary/JSON.hpp" -#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/Series.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerHelper.hpp" #include "openPMD/IO/Format.hpp" #include "openPMD/ReadIterations.hpp" -#include "openPMD/Series.hpp" +#include "openPMD/auxiliary/Date.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/auxiliary/JSON.hpp" +#include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/version.hpp" #include @@ -35,17 +35,18 @@ #include #include #include +#include #include #include - namespace openPMD { namespace { /** Remove the filename extension of a given storage format. * - * @param filename String containing the filename, possibly with filename extension. + * @param filename String containing the filename, possibly with + * filename extension. * @param f File format to remove filename extension for. * @return String containing the filename without filename extension. */ @@ -59,29 +60,34 @@ namespace uint64_t iteration{}; //! iteration found in regex pattern (default: 0) // support for std::tie - operator std::tuple< bool &, int &, uint64_t & >() + operator std::tuple() { - return std::tuple< bool &, int &, uint64_t & >{ - isContained, padding, iteration }; + return std::tuple{ + isContained, padding, iteration}; } }; - /** Create a functor to determine if a file can be of a format and matches an iterationEncoding, given the filename on disk. + /** Create a functor to determine if a file can be of a format and matches + * an iterationEncoding, given the filename on disk. * - * @param prefix String containing head (i.e. before %T) of desired filename without filename extension. - * @param padding Amount of padding allowed in iteration number %T. If zero, any amount of padding is matched. - * @param postfix String containing tail (i.e. after %T) of desired filename without filename extension. + * @param prefix String containing head (i.e. before %T) of desired + * filename without filename extension. + * @param padding Amount of padding allowed in iteration number %T. If + * zero, any amount of padding is matched. + * @param postfix String containing tail (i.e. after %T) of desired + * filename without filename extension. * @param f File format to check backend applicability for. * @return Functor returning tuple of bool and int. - * bool is True if file could be of type f and matches the iterationEncoding. False otherwise. - * int is the amount of padding present in the iteration number %T. Is 0 if bool is False. + * bool is True if file could be of type f and matches the + * iterationEncoding. False otherwise. int is the amount of padding present + * in the iteration number %T. Is 0 if bool is False. */ std::function matcher( std::string const &prefix, int padding, std::string const &postfix, Format f); -} // namespace [anonymous] +} // namespace struct SeriesInterface::ParsedInput { @@ -92,73 +98,70 @@ struct SeriesInterface::ParsedInput std::string filenamePrefix; std::string filenamePostfix; int filenamePadding; -}; //ParsedInput +}; // ParsedInput SeriesInterface::SeriesInterface( - internal::SeriesData * series, internal::AttributableData * attri ) - : AttributableInterface{ attri } - , m_series{ series } -{ -} + internal::SeriesData *series, internal::AttributableData *attri) + : AttributableInterface{attri}, m_series{series} +{} -std::string -SeriesInterface::openPMD() const +std::string SeriesInterface::openPMD() const { - return getAttribute("openPMD").get< std::string >(); + return getAttribute("openPMD").get(); } -SeriesInterface& -SeriesInterface::setOpenPMD(std::string const& o) +SeriesInterface &SeriesInterface::setOpenPMD(std::string const &o) { setAttribute("openPMD", o); return *this; } -uint32_t -SeriesInterface::openPMDextension() const +uint32_t SeriesInterface::openPMDextension() const { - return getAttribute("openPMDextension").get< uint32_t >(); + return getAttribute("openPMDextension").get(); } -SeriesInterface& -SeriesInterface::setOpenPMDextension(uint32_t oe) +SeriesInterface &SeriesInterface::setOpenPMDextension(uint32_t oe) { setAttribute("openPMDextension", oe); return *this; } -std::string -SeriesInterface::basePath() const +std::string SeriesInterface::basePath() const { - return getAttribute("basePath").get< std::string >(); + return getAttribute("basePath").get(); } -SeriesInterface& -SeriesInterface::setBasePath(std::string const& bp) +SeriesInterface &SeriesInterface::setBasePath(std::string const &bp) { std::string version = openPMD(); - if( version == "1.0.0" || version == "1.0.1" || version == "1.1.0" ) - throw std::runtime_error("Custom basePath not allowed in openPMD <=1.1.0"); + if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") + throw std::runtime_error( + "Custom basePath not allowed in openPMD <=1.1.0"); setAttribute("basePath", bp); return *this; } -std::string -SeriesInterface::meshesPath() const +std::string SeriesInterface::meshesPath() const { - return getAttribute("meshesPath").get< std::string >(); + return getAttribute("meshesPath").get(); } -SeriesInterface& -SeriesInterface::setMeshesPath(std::string const& mp) +SeriesInterface &SeriesInterface::setMeshesPath(std::string const &mp) { - auto & series = get(); - if( std::any_of(series.iterations.begin(), series.iterations.end(), - [](Container< Iteration, uint64_t >::value_type const& i){ return i.second.meshes.written(); }) ) - throw std::runtime_error("A files meshesPath can not (yet) be changed after it has been written."); + auto &series = get(); + if (std::any_of( + series.iterations.begin(), + series.iterations.end(), + [](Container::value_type const &i) { + return i.second.meshes.written(); + })) + throw std::runtime_error( + "A files meshesPath can not (yet) be changed after it has been " + "written."); - if( auxiliary::ends_with(mp, '/') ) + if (auxiliary::ends_with(mp, '/')) setAttribute("meshesPath", mp); else setAttribute("meshesPath", mp + "/"); @@ -166,21 +169,25 @@ SeriesInterface::setMeshesPath(std::string const& mp) return *this; } -std::string -SeriesInterface::particlesPath() const +std::string SeriesInterface::particlesPath() const { - return getAttribute("particlesPath").get< std::string >(); + return getAttribute("particlesPath").get(); } -SeriesInterface& -SeriesInterface::setParticlesPath(std::string const& pp) +SeriesInterface &SeriesInterface::setParticlesPath(std::string const &pp) { - auto & series = get(); - if( std::any_of(series.iterations.begin(), series.iterations.end(), - [](Container< Iteration, uint64_t >::value_type const& i){ return i.second.particles.written(); }) ) - throw std::runtime_error("A files particlesPath can not (yet) be changed after it has been written."); + auto &series = get(); + if (std::any_of( + series.iterations.begin(), + series.iterations.end(), + [](Container::value_type const &i) { + return i.second.particles.written(); + })) + throw std::runtime_error( + "A files particlesPath can not (yet) be changed after it has been " + "written."); - if( auxiliary::ends_with(pp, '/') ) + if (auxiliary::ends_with(pp, '/')) setAttribute("particlesPath", pp); else setAttribute("particlesPath", pp + "/"); @@ -188,198 +195,192 @@ SeriesInterface::setParticlesPath(std::string const& pp) return *this; } -std::string -SeriesInterface::author() const +std::string SeriesInterface::author() const { - return getAttribute("author").get< std::string >(); + return getAttribute("author").get(); } -SeriesInterface& -SeriesInterface::setAuthor(std::string const& a) +SeriesInterface &SeriesInterface::setAuthor(std::string const &a) { setAttribute("author", a); return *this; } -std::string -SeriesInterface::software() const +std::string SeriesInterface::software() const { - return getAttribute("software").get< std::string >(); + return getAttribute("software").get(); } -SeriesInterface& -SeriesInterface::setSoftware( std::string const& newName, std::string const& newVersion ) +SeriesInterface &SeriesInterface::setSoftware( + std::string const &newName, std::string const &newVersion) { - setAttribute( "software", newName ); - setAttribute( "softwareVersion", newVersion ); + setAttribute("software", newName); + setAttribute("softwareVersion", newVersion); return *this; } -std::string -SeriesInterface::softwareVersion() const +std::string SeriesInterface::softwareVersion() const { - return getAttribute("softwareVersion").get< std::string >(); + return getAttribute("softwareVersion").get(); } -SeriesInterface& -SeriesInterface::setSoftwareVersion(std::string const& sv) +SeriesInterface &SeriesInterface::setSoftwareVersion(std::string const &sv) { setAttribute("softwareVersion", sv); return *this; } -std::string -SeriesInterface::date() const +std::string SeriesInterface::date() const { - return getAttribute("date").get< std::string >(); + return getAttribute("date").get(); } -SeriesInterface& -SeriesInterface::setDate(std::string const& d) +SeriesInterface &SeriesInterface::setDate(std::string const &d) { setAttribute("date", d); return *this; } -std::string -SeriesInterface::softwareDependencies() const +std::string SeriesInterface::softwareDependencies() const { - return getAttribute("softwareDependencies").get< std::string >(); + return getAttribute("softwareDependencies").get(); } -SeriesInterface& -SeriesInterface::setSoftwareDependencies(std::string const &newSoftwareDependencies) +SeriesInterface &SeriesInterface::setSoftwareDependencies( + std::string const &newSoftwareDependencies) { setAttribute("softwareDependencies", newSoftwareDependencies); return *this; } -std::string -SeriesInterface::machine() const +std::string SeriesInterface::machine() const { - return getAttribute("machine").get< std::string >(); + return getAttribute("machine").get(); } -SeriesInterface& -SeriesInterface::setMachine(std::string const &newMachine) +SeriesInterface &SeriesInterface::setMachine(std::string const &newMachine) { setAttribute("machine", newMachine); return *this; } -IterationEncoding -SeriesInterface::iterationEncoding() const +IterationEncoding SeriesInterface::iterationEncoding() const { return get().m_iterationEncoding; } -SeriesInterface& -SeriesInterface::setIterationEncoding(IterationEncoding ie) +SeriesInterface &SeriesInterface::setIterationEncoding(IterationEncoding ie) { - auto & series = get(); - if( written() ) - throw std::runtime_error("A files iterationEncoding can not (yet) be changed after it has been written."); + auto &series = get(); + if (written()) + throw std::runtime_error( + "A files iterationEncoding can not (yet) be changed after it has " + "been written."); series.m_iterationEncoding = ie; - switch( ie ) + switch (ie) { - case IterationEncoding::fileBased: - setIterationFormat(series.m_name); - setAttribute("iterationEncoding", std::string("fileBased")); - break; - case IterationEncoding::groupBased: - setIterationFormat(BASEPATH); - setAttribute("iterationEncoding", std::string("groupBased")); - break; - case IterationEncoding::variableBased: - setIterationFormat( - auxiliary::replace_first(basePath(), "/%T/", "")); - setAttribute("iterationEncoding", std::string("variableBased")); - break; + case IterationEncoding::fileBased: + setIterationFormat(series.m_name); + setAttribute("iterationEncoding", std::string("fileBased")); + break; + case IterationEncoding::groupBased: + setIterationFormat(BASEPATH); + setAttribute("iterationEncoding", std::string("groupBased")); + break; + case IterationEncoding::variableBased: + setIterationFormat(auxiliary::replace_first(basePath(), "/%T/", "")); + setAttribute("iterationEncoding", std::string("variableBased")); + break; } return *this; } -std::string -SeriesInterface::iterationFormat() const +std::string SeriesInterface::iterationFormat() const { - return getAttribute("iterationFormat").get< std::string >(); + return getAttribute("iterationFormat").get(); } -SeriesInterface& -SeriesInterface::setIterationFormat(std::string const& i) +SeriesInterface &SeriesInterface::setIterationFormat(std::string const &i) { - if( written() ) - throw std::runtime_error("A files iterationFormat can not (yet) be changed after it has been written."); + if (written()) + throw std::runtime_error( + "A files iterationFormat can not (yet) be changed after it has " + "been written."); - if( iterationEncoding() == IterationEncoding::groupBased || - iterationEncoding() == IterationEncoding::variableBased ) - if( basePath() != i && (openPMD() == "1.0.1" || openPMD() == "1.0.0") ) - throw std::invalid_argument("iterationFormat must not differ from basePath " + basePath() + " for group- or variableBased data"); + if (iterationEncoding() == IterationEncoding::groupBased || + iterationEncoding() == IterationEncoding::variableBased) + if (basePath() != i && (openPMD() == "1.0.1" || openPMD() == "1.0.0")) + throw std::invalid_argument( + "iterationFormat must not differ from basePath " + basePath() + + " for group- or variableBased data"); setAttribute("iterationFormat", i); return *this; } -std::string -SeriesInterface::name() const +std::string SeriesInterface::name() const { return get().m_name; } -SeriesInterface& -SeriesInterface::setName(std::string const& n) +SeriesInterface &SeriesInterface::setName(std::string const &n) { - auto & series = get(); - if( written() ) - throw std::runtime_error("A files name can not (yet) be changed after it has been written."); + auto &series = get(); + if (written()) + throw std::runtime_error( + "A files name can not (yet) be changed after it has been written."); - if( series.m_iterationEncoding == IterationEncoding::fileBased && !auxiliary::contains(series.m_name, "%T") ) - throw std::runtime_error("For fileBased formats the iteration regex %T must be included in the file name"); + if (series.m_iterationEncoding == IterationEncoding::fileBased && + !auxiliary::contains(series.m_name, "%T")) + throw std::runtime_error( + "For fileBased formats the iteration regex %T must be included in " + "the file name"); series.m_name = n; dirty() = true; return *this; } -std::string -SeriesInterface::backend() const +std::string SeriesInterface::backend() const { return IOHandler()->backendName(); } -void -SeriesInterface::flush() +void SeriesInterface::flush() { - auto & series = get(); + auto &series = get(); flush_impl( series.iterations.begin(), series.iterations.end(), - FlushLevel::UserFlush ); + {FlushLevel::UserFlush}); } -std::unique_ptr< SeriesInterface::ParsedInput > +std::unique_ptr SeriesInterface::parseInput(std::string filepath) { - std::unique_ptr< SeriesInterface::ParsedInput > input{new SeriesInterface::ParsedInput}; + std::unique_ptr input{ + new SeriesInterface::ParsedInput}; #ifdef _WIN32 - if( auxiliary::contains(filepath, '/') ) + if (auxiliary::contains(filepath, '/')) { - std::cerr << "Filepaths on WINDOWS platforms may not contain slashes '/'! " - << "Replacing with backslashes '\\' unconditionally!" << std::endl; + std::cerr + << "Filepaths on WINDOWS platforms may not contain slashes '/'! " + << "Replacing with backslashes '\\' unconditionally!" << std::endl; filepath = auxiliary::replace_all(filepath, "/", "\\"); } #else - if( auxiliary::contains(filepath, '\\') ) + if (auxiliary::contains(filepath, '\\')) { - std::cerr << "Filepaths on UNIX platforms may not include backslashes '\\'! " - << "Replacing with slashes '/' unconditionally!" << std::endl; + std::cerr + << "Filepaths on UNIX platforms may not include backslashes '\\'! " + << "Replacing with slashes '/' unconditionally!" << std::endl; filepath = auxiliary::replace_all(filepath, "\\", "/"); } #endif auto const pos = filepath.find_last_of(auxiliary::directory_separator); - if( std::string::npos == pos ) + if (std::string::npos == pos) { input->path = "."; input->path.append(1, auxiliary::directory_separator); @@ -396,26 +397,30 @@ SeriesInterface::parseInput(std::string filepath) std::regex pattern("(.*)%(0[[:digit:]]+)?T(.*)"); std::smatch regexMatch; std::regex_match(input->name, regexMatch, pattern); - if( regexMatch.empty() ) + if (regexMatch.empty()) input->iterationEncoding = IterationEncoding::groupBased; - else if( regexMatch.size() == 4 ) + else if (regexMatch.size() == 4) { input->iterationEncoding = IterationEncoding::fileBased; input->filenamePrefix = regexMatch[1].str(); - std::string const& pad = regexMatch[2]; - if( pad.empty() ) + std::string const &pad = regexMatch[2]; + if (pad.empty()) input->filenamePadding = 0; else { - if( pad.front() != '0' ) - throw std::runtime_error("Invalid iterationEncoding " + input->name); + if (pad.front() != '0') + throw std::runtime_error( + "Invalid iterationEncoding " + input->name); input->filenamePadding = std::stoi(pad); } input->filenamePostfix = regexMatch[3].str(); - } else - throw std::runtime_error("Can not determine iterationFormat from filename " + input->name); + } + else + throw std::runtime_error( + "Can not determine iterationFormat from filename " + input->name); - input->filenamePostfix = cleanFilename(input->filenamePostfix, input->format); + input->filenamePostfix = + cleanFilename(input->filenamePostfix, input->format); input->name = cleanFilename(input->name, input->format); @@ -423,13 +428,13 @@ SeriesInterface::parseInput(std::string filepath) } void SeriesInterface::init( - std::shared_ptr< AbstractIOHandler > ioHandler, - std::unique_ptr< SeriesInterface::ParsedInput > input ) + std::shared_ptr ioHandler, + std::unique_ptr input) { - auto & series = get(); + auto &series = get(); writable().IOHandler = ioHandler; series.iterations.linkHierarchy(writable()); - series.iterations.writable().ownKeyWithinParent = { "iterations" }; + series.iterations.writable().ownKeyWithinParent = {"iterations"}; series.m_name = input->name; @@ -439,10 +444,10 @@ void SeriesInterface::init( series.m_filenamePostfix = input->filenamePostfix; series.m_filenamePadding = input->filenamePadding; - if( series.m_iterationEncoding == IterationEncoding::fileBased && + if (series.m_iterationEncoding == IterationEncoding::fileBased && !series.m_filenamePrefix.empty() && - std::isdigit( static_cast< unsigned char >( - *series.m_filenamePrefix.rbegin() ) ) ) + std::isdigit( + static_cast(*series.m_filenamePrefix.rbegin()))) { std::cerr << R"END( [Warning] In file-based iteration encoding, it is strongly recommended to avoid @@ -454,49 +459,50 @@ Given file pattern: ')END" << series.m_name << "'" << std::endl; } - if(IOHandler()->m_frontendAccess == Access::READ_ONLY || IOHandler()->m_frontendAccess == Access::READ_WRITE ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY || + IOHandler()->m_frontendAccess == Access::READ_WRITE) { /* Allow creation of values in Containers and setting of Attributes * Would throw for Access::READ_ONLY */ auto oldType = IOHandler()->m_frontendAccess; - auto newType = const_cast< Access* >(&IOHandler()->m_frontendAccess); + auto newType = const_cast(&IOHandler()->m_frontendAccess); *newType = Access::READ_WRITE; - if( input->iterationEncoding == IterationEncoding::fileBased ) + if (input->iterationEncoding == IterationEncoding::fileBased) readFileBased(); else readGorVBased(); - if( series.iterations.empty() ) + if (series.iterations.empty()) { /* Access::READ_WRITE can be used to create a new Series * allow setting attributes in that case */ written() = false; - initDefaults( input->iterationEncoding ); + initDefaults(input->iterationEncoding); setIterationEncoding(input->iterationEncoding); written() = true; } *newType = oldType; - } else + } + else { - initDefaults( input->iterationEncoding ); + initDefaults(input->iterationEncoding); setIterationEncoding(input->iterationEncoding); } } -void -SeriesInterface::initDefaults( IterationEncoding ie ) +void SeriesInterface::initDefaults(IterationEncoding ie) { - if( !containsAttribute("openPMD")) - setOpenPMD( getStandard() ); - if( !containsAttribute("openPMDextension")) + if (!containsAttribute("openPMD")) + setOpenPMD(getStandard()); + if (!containsAttribute("openPMDextension")) setOpenPMDextension(0); - if( !containsAttribute("basePath")) + if (!containsAttribute("basePath")) { - if( ie == IterationEncoding::variableBased ) + if (ie == IterationEncoding::variableBased) { setAttribute( "basePath", auxiliary::replace_first(BASEPATH, "/%T/", "")); @@ -506,97 +512,92 @@ SeriesInterface::initDefaults( IterationEncoding ie ) setAttribute("basePath", std::string(BASEPATH)); } } - if( !containsAttribute("date")) - setDate( auxiliary::getDateString() ); - if( !containsAttribute("software")) - setSoftware( "openPMD-api", getVersion() ); + if (!containsAttribute("date")) + setDate(auxiliary::getDateString()); + if (!containsAttribute("software")) + setSoftware("openPMD-api", getVersion()); } -std::future< void > -SeriesInterface::flush_impl( +std::future SeriesInterface::flush_impl( iterations_iterator begin, iterations_iterator end, - FlushLevel level, - bool flushIOHandler ) + internal::FlushParams flushParams, + bool flushIOHandler) { - IOHandler()->m_flushLevel = level; - auto & series = get(); + auto &series = get(); series.m_lastFlushSuccessful = true; try { - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; - case IE::fileBased: - flushFileBased( begin, end ); - break; - case IE::groupBased: - case IE::variableBased: - flushGorVBased( begin, end ); - break; + case IE::fileBased: + flushFileBased(begin, end, flushParams); + break; + case IE::groupBased: + case IE::variableBased: + flushGorVBased(begin, end, flushParams); + break; } - if( flushIOHandler ) + if (flushIOHandler) { - auto res = IOHandler()->flush(); - IOHandler()->m_flushLevel = FlushLevel::InternalFlush; - return res; + return IOHandler()->flush(flushParams); } else { - IOHandler()->m_flushLevel = FlushLevel::InternalFlush; return {}; } } - catch( ... ) + catch (...) { - IOHandler()->m_flushLevel = FlushLevel::InternalFlush; series.m_lastFlushSuccessful = false; throw; } } -void -SeriesInterface::flushFileBased( iterations_iterator begin, iterations_iterator end ) +void SeriesInterface::flushFileBased( + iterations_iterator begin, + iterations_iterator end, + internal::FlushParams flushParams) { - auto & series = get(); - if( end == begin ) + auto &series = get(); + if (end == begin) throw std::runtime_error( - "fileBased output can not be written with no iterations." ); + "fileBased output can not be written with no iterations."); - if( IOHandler()->m_frontendAccess == Access::READ_ONLY ) - for( auto it = begin; it != end; ++it ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: - it->second.flush(); + it->second.flush(flushParams); break; case IO::RemainsClosed: break; } // Phase 2 - if( *it->second.m_closed == - Iteration::CloseStatus::ClosedInFrontend ) + if (*it->second.m_closed == + Iteration::CloseStatus::ClosedInFrontend) { - Parameter< Operation::CLOSE_FILE > fClose; - IOHandler()->enqueue( - IOTask( &it->second, std::move( fClose ) ) ); + Parameter fClose; + IOHandler()->enqueue(IOTask(&it->second, std::move(fClose))); *it->second.m_closed = Iteration::CloseStatus::ClosedInBackend; } // Phase 3 - IOHandler()->flush(); + IOHandler()->flush(flushParams); } else { bool allDirty = dirty(); - for( auto it = begin; it != end; ++it ) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: { @@ -608,13 +609,14 @@ SeriesInterface::flushFileBased( iterations_iterator begin, iterations_iterator series.iterations.written() = false; dirty() |= it->second.dirty(); - std::string filename = iterationFilename( it->first ); - it->second.flushFileBased( filename, it->first ); + std::string filename = iterationFilename(it->first); + it->second.flushFileBased(filename, it->first, flushParams); series.iterations.flush( - auxiliary::replace_first( basePath(), "%T/", "" ) ); + auxiliary::replace_first(basePath(), "%T/", ""), + flushParams); - flushAttributes(); + flushAttributes(flushParams); break; } case IO::RemainsClosed: @@ -622,91 +624,93 @@ SeriesInterface::flushFileBased( iterations_iterator begin, iterations_iterator } // Phase 2 - if( *it->second.m_closed == - Iteration::CloseStatus::ClosedInFrontend ) + if (*it->second.m_closed == + Iteration::CloseStatus::ClosedInFrontend) { - Parameter< Operation::CLOSE_FILE > fClose; - IOHandler()->enqueue( - IOTask( &it->second, std::move( fClose ) ) ); + Parameter fClose; + IOHandler()->enqueue(IOTask(&it->second, std::move(fClose))); *it->second.m_closed = Iteration::CloseStatus::ClosedInBackend; } // Phase 3 - IOHandler()->flush(); + IOHandler()->flush(flushParams); /* reset the dirty bit for every iteration (i.e. file) - * otherwise only the first iteration will have updates attributes */ + * otherwise only the first iteration will have updates attributes + */ dirty() = allDirty; } dirty() = false; } } -void -SeriesInterface::flushGorVBased( iterations_iterator begin, iterations_iterator end ) +void SeriesInterface::flushGorVBased( + iterations_iterator begin, + iterations_iterator end, + internal::FlushParams flushParams) { - auto & series = get(); - if( IOHandler()->m_frontendAccess == Access::READ_ONLY ) - for( auto it = begin; it != end; ++it ) + auto &series = get(); + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: - it->second.flush(); + it->second.flush(flushParams); break; case IO::RemainsClosed: break; } // Phase 2 - if( *it->second.m_closed == - Iteration::CloseStatus::ClosedInFrontend ) + if (*it->second.m_closed == + Iteration::CloseStatus::ClosedInFrontend) { // the iteration has no dedicated file in group-based mode *it->second.m_closed = Iteration::CloseStatus::ClosedInBackend; } // Phase 3 - IOHandler()->flush(); + IOHandler()->flush(flushParams); } else { - if( !written() ) + if (!written()) { - Parameter< Operation::CREATE_FILE > fCreate; + Parameter fCreate; fCreate.name = series.m_name; fCreate.encoding = iterationEncoding(); IOHandler()->enqueue(IOTask(this, fCreate)); } series.iterations.flush( - auxiliary::replace_first( basePath(), "%T/", "" ) ); + auxiliary::replace_first(basePath(), "%T/", ""), flushParams); - for( auto it = begin; it != end; ++it ) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: - if( !it->second.written() ) + if (!it->second.written()) { - it->second.parent() = getWritable( &series.iterations ); + it->second.parent() = getWritable(&series.iterations); } - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; case IE::groupBased: - it->second.flushGroupBased( it->first ); + it->second.flushGroupBased(it->first, flushParams); break; case IE::variableBased: - it->second.flushVariableBased( it->first ); + it->second.flushVariableBased(it->first, flushParams); break; default: throw std::runtime_error( - "[Series] Internal control flow error" ); + "[Series] Internal control flow error"); } break; case IO::RemainsClosed: @@ -714,23 +718,22 @@ SeriesInterface::flushGorVBased( iterations_iterator begin, iterations_iterator } // Phase 2 - if( *it->second.m_closed == - Iteration::CloseStatus::ClosedInFrontend ) + if (*it->second.m_closed == + Iteration::CloseStatus::ClosedInFrontend) { // the iteration has no dedicated file in group-based mode *it->second.m_closed = Iteration::CloseStatus::ClosedInBackend; } } - flushAttributes(); - IOHandler()->flush(); + flushAttributes(flushParams); + IOHandler()->flush(flushParams); } } -void -SeriesInterface::flushMeshesPath() +void SeriesInterface::flushMeshesPath() { - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "meshesPath"; Attribute a = getAttribute("meshesPath"); aWrite.resource = a.getResource(); @@ -738,10 +741,9 @@ SeriesInterface::flushMeshesPath() IOHandler()->enqueue(IOTask(this, aWrite)); } -void -SeriesInterface::flushParticlesPath() +void SeriesInterface::flushParticlesPath() { - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "particlesPath"; Attribute a = getAttribute("particlesPath"); aWrite.resource = a.getResource(); @@ -749,61 +751,62 @@ SeriesInterface::flushParticlesPath() IOHandler()->enqueue(IOTask(this, aWrite)); } -void -SeriesInterface::readFileBased( ) +void SeriesInterface::readFileBased() { - auto & series = get(); - Parameter< Operation::OPEN_FILE > fOpen; - Parameter< Operation::READ_ATT > aRead; + auto &series = get(); + Parameter fOpen; + Parameter aRead; fOpen.encoding = iterationEncoding(); - if( !auxiliary::directory_exists(IOHandler()->directory) ) - throw no_such_file_error("Supplied directory is not valid: " + IOHandler()->directory); + if (!auxiliary::directory_exists(IOHandler()->directory)) + throw no_such_file_error( + "Supplied directory is not valid: " + IOHandler()->directory); auto isPartOfSeries = matcher( - series.m_filenamePrefix, series.m_filenamePadding, - series.m_filenamePostfix, series.m_format); + series.m_filenamePrefix, + series.m_filenamePadding, + series.m_filenamePostfix, + series.m_format); bool isContained; int padding; uint64_t iterationIndex; - std::set< int > paddings; - for( auto const& entry : auxiliary::list_directory(IOHandler()->directory) ) + std::set paddings; + for (auto const &entry : auxiliary::list_directory(IOHandler()->directory)) { std::tie(isContained, padding, iterationIndex) = isPartOfSeries(entry); - if( isContained ) + if (isContained) { - Iteration & i = series.iterations[ iterationIndex ]; - i.deferParseAccess( { - std::to_string( iterationIndex ), - iterationIndex, - true, - entry } ); - // TODO skip if the padding is exact the number of chars in an iteration? + Iteration &i = series.iterations[iterationIndex]; + i.deferParseAccess( + {std::to_string(iterationIndex), iterationIndex, true, entry}); + // TODO skip if the padding is exact the number of chars in an + // iteration? paddings.insert(padding); } } - if( series.iterations.empty() ) + if (series.iterations.empty()) { - /* Frontend access type might change during SeriesInterface::read() to allow parameter modification. - * Backend access type stays unchanged for the lifetime of a Series. */ - if(IOHandler()->m_backendAccess == Access::READ_ONLY ) + /* Frontend access type might change during SeriesInterface::read() to + * allow parameter modification. Backend access type stays unchanged for + * the lifetime of a Series. */ + if (IOHandler()->m_backendAccess == Access::READ_ONLY) throw no_such_file_error("No matching iterations found: " + name()); else - std::cerr << "No matching iterations found: " << name() << std::endl; + std::cerr << "No matching iterations found: " << name() + << std::endl; } - auto readIterationEagerly = []( Iteration & iteration ) - { + auto readIterationEagerly = [](Iteration &iteration) { iteration.runDeferredParseAccess(); - Parameter< Operation::CLOSE_FILE > fClose; - iteration.IOHandler()->enqueue( IOTask( &iteration, fClose ) ); - iteration.IOHandler()->flush(); + Parameter fClose; + iteration.IOHandler()->enqueue(IOTask(&iteration, fClose)); + iteration.IOHandler()->flush(internal::defaultFlushParams); *iteration.m_closed = Iteration::CloseStatus::ClosedTemporarily; }; - if( series.m_parseLazily ) + if (series.m_parseLazily) { - for( auto & iteration : series.iterations ) + for (auto &iteration : series.iterations) { *iteration.second.m_closed = Iteration::CloseStatus::ParseAccessDeferred; @@ -811,60 +814,62 @@ SeriesInterface::readFileBased( ) // open the last iteration, just to parse Series attributes auto getLastIteration = series.iterations.end(); getLastIteration--; - auto & lastIteration = getLastIteration->second; - readIterationEagerly( lastIteration ); + auto &lastIteration = getLastIteration->second; + readIterationEagerly(lastIteration); } else { - for( auto & iteration : series.iterations ) + for (auto &iteration : series.iterations) { - readIterationEagerly( iteration.second ); + readIterationEagerly(iteration.second); } } - if( paddings.size() == 1u ) + if (paddings.size() == 1u) series.m_filenamePadding = *paddings.begin(); - /* Frontend access type might change during SeriesInterface::read() to allow parameter modification. - * Backend access type stays unchanged for the lifetime of a Series. */ - if( paddings.size() > 1u && IOHandler()->m_backendAccess == Access::READ_WRITE ) - throw std::runtime_error("Cannot write to a series with inconsistent iteration padding. " - "Please specify '%0T' or open as read-only."); + /* Frontend access type might change during SeriesInterface::read() to allow + * parameter modification. Backend access type stays unchanged for the + * lifetime of a Series. */ + if (paddings.size() > 1u && + IOHandler()->m_backendAccess == Access::READ_WRITE) + throw std::runtime_error( + "Cannot write to a series with inconsistent iteration padding. " + "Please specify '%0T' or open as read-only."); } -void SeriesInterface::readOneIterationFileBased( std::string const & filePath ) +void SeriesInterface::readOneIterationFileBased(std::string const &filePath) { - auto & series = get(); + auto &series = get(); - Parameter< Operation::OPEN_FILE > fOpen; - Parameter< Operation::READ_ATT > aRead; + Parameter fOpen; + Parameter aRead; fOpen.name = filePath; IOHandler()->enqueue(IOTask(this, fOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); series.iterations.parent() = getWritable(this); readBase(); using DT = Datatype; aRead.name = "iterationEncoding"; - IOHandler()->enqueue( IOTask( this, aRead ) ); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->enqueue(IOTask(this, aRead)); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { - std::string encoding = - Attribute( *aRead.resource ).get< std::string >(); - if( encoding == "fileBased" ) + std::string encoding = Attribute(*aRead.resource).get(); + if (encoding == "fileBased") series.m_iterationEncoding = IterationEncoding::fileBased; - else if( encoding == "groupBased" ) + else if (encoding == "groupBased") { series.m_iterationEncoding = IterationEncoding::groupBased; std::cerr << "Series constructor called with iteration " - "regex '%T' suggests loading a " - << "time series with fileBased iteration " - "encoding. Loaded file is groupBased.\n"; + "regex '%T' suggests loading a " + << "time series with fileBased iteration " + "encoding. Loaded file is groupBased.\n"; } - else if( encoding == "variableBased" ) + else if (encoding == "variableBased") { /* * Unlike if the file were group-based, this one doesn't work @@ -874,145 +879,151 @@ void SeriesInterface::readOneIterationFileBased( std::string const & filePath ) "Series constructor called with iteration " "regex '%T' suggests loading a " "time series with fileBased iteration " - "encoding. Loaded file is variableBased." ); + "encoding. Loaded file is variableBased."); } else - throw std::runtime_error( - "Unknown iterationEncoding: " + encoding ); - setAttribute( "iterationEncoding", encoding ); + throw std::runtime_error("Unknown iterationEncoding: " + encoding); + setAttribute("iterationEncoding", encoding); } else - throw std::runtime_error( "Unexpected Attribute datatype " - "for 'iterationEncoding'" ); + throw std::runtime_error( + "Unexpected Attribute datatype " + "for 'iterationEncoding'"); aRead.name = "iterationFormat"; - IOHandler()->enqueue( IOTask( this, aRead ) ); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->enqueue(IOTask(this, aRead)); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { written() = false; - setIterationFormat( - Attribute( *aRead.resource ).get< std::string >() ); + setIterationFormat(Attribute(*aRead.resource).get()); written() = true; } else throw std::runtime_error( - "Unexpected Attribute datatype for 'iterationFormat'" ); + "Unexpected Attribute datatype for 'iterationFormat'"); - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; std::string version = openPMD(); - if( version == "1.0.0" || version == "1.0.1" || version == "1.1.0" ) + if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") pOpen.path = auxiliary::replace_first(basePath(), "/%T/", ""); else throw std::runtime_error("Unknown openPMD version - " + version); IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); - readAttributes( ReadMode::IgnoreExisting ); - series.iterations.readAttributes(ReadMode::OverrideExisting ); + readAttributes(ReadMode::IgnoreExisting); + series.iterations.readAttributes(ReadMode::OverrideExisting); } -void -SeriesInterface::readGorVBased( bool do_init ) +void SeriesInterface::readGorVBased(bool do_init) { - auto & series = get(); - Parameter< Operation::OPEN_FILE > fOpen; + auto &series = get(); + Parameter fOpen; fOpen.name = series.m_name; fOpen.encoding = iterationEncoding(); IOHandler()->enqueue(IOTask(this, fOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - if( do_init ) + if (do_init) { readBase(); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "iterationEncoding"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { - std::string encoding = Attribute(*aRead.resource).get< std::string >(); - if( encoding == "groupBased" ) + std::string encoding = + Attribute(*aRead.resource).get(); + if (encoding == "groupBased") series.m_iterationEncoding = IterationEncoding::groupBased; - else if( encoding == "variableBased" ) + else if (encoding == "variableBased") series.m_iterationEncoding = IterationEncoding::variableBased; - else if( encoding == "fileBased" ) + else if (encoding == "fileBased") { series.m_iterationEncoding = IterationEncoding::fileBased; - std::cerr << "Series constructor called with explicit iteration suggests loading a " - << "single file with groupBased iteration encoding. Loaded file is fileBased.\n"; + std::cerr << "Series constructor called with explicit " + "iteration suggests loading a " + << "single file with groupBased iteration encoding. " + "Loaded file is fileBased.\n"; /* * We'll want the openPMD API to continue series.m_name to open * the file instead of piecing the name together via * prefix-padding-postfix things. */ series.m_overrideFilebasedFilename = series.m_name; - } else - throw std::runtime_error("Unknown iterationEncoding: " + encoding); + } + else + throw std::runtime_error( + "Unknown iterationEncoding: " + encoding); setAttribute("iterationEncoding", encoding); } else - throw std::runtime_error("Unexpected Attribute datatype for 'iterationEncoding'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'iterationEncoding'"); aRead.name = "iterationFormat"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { written() = false; - setIterationFormat(Attribute(*aRead.resource).get< std::string >()); + setIterationFormat(Attribute(*aRead.resource).get()); written() = true; } else - throw std::runtime_error("Unexpected Attribute datatype for 'iterationFormat'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'iterationFormat'"); } - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; std::string version = openPMD(); - if( version == "1.0.0" || version == "1.0.1" || version == "1.1.0" ) + if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") pOpen.path = auxiliary::replace_first(basePath(), "/%T/", ""); else throw std::runtime_error("Unknown openPMD version - " + version); IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); - readAttributes( ReadMode::IgnoreExisting ); + readAttributes(ReadMode::IgnoreExisting); /* * 'snapshot' changes over steps, so reread that. */ - series.iterations.readAttributes( ReadMode::OverrideExisting ); + series.iterations.readAttributes(ReadMode::OverrideExisting); /* obtain all paths inside the basepath (i.e. all iterations) */ - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(&series.iterations, pList)); - IOHandler()->flush(); - - auto readSingleIteration = - [&series, &pOpen, this] - (uint64_t index, std::string path, bool guardAgainstRereading ) - { - if( series.iterations.contains( index ) ) + IOHandler()->flush(internal::defaultFlushParams); + + auto readSingleIteration = [&series, &pOpen, this]( + uint64_t index, + std::string path, + bool guardAgainstRereading, + bool beginStep) { + if (series.iterations.contains(index)) { // maybe re-read - auto & i = series.iterations.at( index ); + auto &i = series.iterations.at(index); // i.written(): the iteration has already been parsed // reparsing is not needed - if( guardAgainstRereading && i.written() ) + if (guardAgainstRereading && i.written()) { return; } - if( *i.m_closed != Iteration::CloseStatus::ParseAccessDeferred ) + if (*i.m_closed != Iteration::CloseStatus::ParseAccessDeferred) { pOpen.path = path; - IOHandler()->enqueue( IOTask( &i, pOpen ) ); - i.reread( path ); + IOHandler()->enqueue(IOTask(&i, pOpen)); + i.reread(path); } } else { // parse for the first time, resp. delay the parsing process - Iteration & i = series.iterations[ index ]; - i.deferParseAccess( { path, index, false, "" } ); - if( !series.m_parseLazily ) + Iteration &i = series.iterations[index]; + i.deferParseAccess({path, index, false, "", beginStep}); + if (!series.m_parseLazily) { i.runDeferredParseAccess(); *i.m_closed = Iteration::CloseStatus::Open; @@ -1024,149 +1035,179 @@ SeriesInterface::readGorVBased( bool do_init ) } }; - switch( iterationEncoding() ) + switch (iterationEncoding()) { case IterationEncoding::groupBased: /* * Sic! This happens when a file-based Series is opened in group-based mode. */ case IterationEncoding::fileBased: - for( auto const & it : *pList.paths ) + for (auto const &it : *pList.paths) { - uint64_t index = std::stoull( it ); - readSingleIteration( index, it, true ); + uint64_t index = std::stoull(it); + /* + * For now: parse a Series in RandomAccess mode. + * (beginStep = false) + * A streaming read mode might come in a future API addition. + */ + readSingleIteration(index, it, true, false); } break; - case IterationEncoding::variableBased: - { + case IterationEncoding::variableBased: { uint64_t index = 0; - if( series.iterations.containsAttribute( "snapshot" ) ) + if (series.iterations.containsAttribute("snapshot")) { - index = series.iterations - .getAttribute( "snapshot" ) - .get< uint64_t >(); + index = series.iterations.getAttribute("snapshot").get(); } - readSingleIteration( index, "", false ); + /* + * Variable-based iteration encoding relies on steps, so parsing must + * happen after opening the first step. + */ + readSingleIteration(index, "", false, true); break; } } } -void -SeriesInterface::readBase() +void SeriesInterface::readBase() { - auto & series = get(); + auto &series = get(); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "openPMD"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) - setOpenPMD(Attribute(*aRead.resource).get< std::string >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) + setOpenPMD(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'openPMD'"); aRead.name = "openPMDextension"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == determineDatatype< uint32_t >() ) - setOpenPMDextension(Attribute(*aRead.resource).get< uint32_t >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == determineDatatype()) + setOpenPMDextension(Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'openPMDextension'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'openPMDextension'"); aRead.name = "basePath"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) - setAttribute("basePath", Attribute(*aRead.resource).get< std::string >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) + setAttribute("basePath", Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'basePath'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'basePath'"); - Parameter< Operation::LIST_ATTS > aList; + Parameter aList; IOHandler()->enqueue(IOTask(this, aList)); - IOHandler()->flush(); - if( std::count(aList.attributes->begin(), aList.attributes->end(), "meshesPath") == 1 ) + IOHandler()->flush(internal::defaultFlushParams); + if (std::count( + aList.attributes->begin(), aList.attributes->end(), "meshesPath") == + 1) { aRead.name = "meshesPath"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { /* allow setting the meshes path after completed IO */ - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.meshes.written() = false; - setMeshesPath(Attribute(*aRead.resource).get< std::string >()); + setMeshesPath(Attribute(*aRead.resource).get()); - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.meshes.written() = true; } else - throw std::runtime_error("Unexpected Attribute datatype for 'meshesPath'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'meshesPath'"); } - if( std::count(aList.attributes->begin(), aList.attributes->end(), "particlesPath") == 1 ) + if (std::count( + aList.attributes->begin(), + aList.attributes->end(), + "particlesPath") == 1) { aRead.name = "particlesPath"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == DT::STRING) { /* allow setting the meshes path after completed IO */ - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.particles.written() = false; - setParticlesPath(Attribute(*aRead.resource).get< std::string >()); + setParticlesPath(Attribute(*aRead.resource).get()); - - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.particles.written() = true; } else - throw std::runtime_error("Unexpected Attribute datatype for 'particlesPath'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'particlesPath'"); } } -std::string -SeriesInterface::iterationFilename( uint64_t i ) +std::string SeriesInterface::iterationFilename(uint64_t i) { - auto & series = get(); - if( series.m_overrideFilebasedFilename.has_value() ) + /* + * The filename might have been overridden at the Series level or at the + * Iteration level. See the struct members' documentation for the reasons. + */ + auto &series = get(); + auto iteration = series.iterations.find(i); + if (series.m_overrideFilebasedFilename.has_value()) { return series.m_overrideFilebasedFilename.get(); } - std::stringstream iteration( "" ); - iteration << std::setw( series.m_filenamePadding ) - << std::setfill( '0' ) << i; - return series.m_filenamePrefix + iteration.str() - + series.m_filenamePostfix; + else if ( + iteration != series.iterations.end() && + iteration->second.m_overrideFilebasedFilename->has_value()) + { + return iteration->second.m_overrideFilebasedFilename->get(); + } + else + { + /* + * If no filename has been explicitly stored, we use the filename + * pattern to compute it. + */ + std::stringstream iterationNr(""); + iterationNr << std::setw(series.m_filenamePadding) << std::setfill('0') + << i; + return series.m_filenamePrefix + iterationNr.str() + + series.m_filenamePostfix; + } } SeriesInterface::iterations_iterator -SeriesInterface::indexOf( Iteration const & iteration ) +SeriesInterface::indexOf(Iteration const &iteration) { - auto & series = get(); - for( auto it = series.iterations.begin(); it != series.iterations.end(); - ++it ) + auto &series = get(); + for (auto it = series.iterations.begin(); it != series.iterations.end(); + ++it) { - if( &it->second.Attributable::get() == &iteration.Attributable::get() ) + if (&it->second.Attributable::get() == &iteration.Attributable::get()) { return it; } } throw std::runtime_error( - "[Iteration::close] Iteration not found in Series." ); + "[Iteration::close] Iteration not found in Series."); } -AdvanceStatus -SeriesInterface::advance( +AdvanceStatus SeriesInterface::advance( AdvanceMode mode, - internal::AttributableData & file, + internal::AttributableData &file, iterations_iterator begin, - Iteration & iteration ) + Iteration &iteration) { - auto & series = get(); + constexpr internal::FlushParams flushParams = {FlushLevel::UserFlush}; + auto &series = get(); auto end = begin; ++end; /* @@ -1178,23 +1219,39 @@ SeriesInterface::advance( * flush_impl(), set CloseStatus to Open for now. */ Iteration::CloseStatus oldCloseStatus = *iteration.m_closed; - if( oldCloseStatus == Iteration::CloseStatus::ClosedInFrontend ) + if (oldCloseStatus == Iteration::CloseStatus::ClosedInFrontend) { *iteration.m_closed = Iteration::CloseStatus::Open; } - flush_impl( - begin, end, FlushLevel::UserFlush, /* flushIOHandler = */ false ); + switch (mode) + { + case AdvanceMode::ENDSTEP: + flush_impl(begin, end, flushParams, /* flushIOHandler = */ false); + break; + case AdvanceMode::BEGINSTEP: + /* + * When beginning a step, there is nothing to flush yet. + * Data is not written in between steps. + * So only make sure that files are accessed. + */ + flush_impl( + begin, + end, + {FlushLevel::CreateOrOpenFiles}, + /* flushIOHandler = */ false); + break; + } - if( oldCloseStatus == Iteration::CloseStatus::ClosedInFrontend ) + if (oldCloseStatus == Iteration::CloseStatus::ClosedInFrontend) { // Series::flush() would normally turn a `ClosedInFrontend` into // a `ClosedInBackend`. Do that manually. *iteration.m_closed = Iteration::CloseStatus::ClosedInBackend; } - else if( + else if ( oldCloseStatus == Iteration::CloseStatus::ClosedInBackend && - series.m_iterationEncoding == IterationEncoding::fileBased ) + series.m_iterationEncoding == IterationEncoding::fileBased) { /* * In file-based iteration encoding, we want to avoid accidentally @@ -1204,110 +1261,96 @@ SeriesInterface::advance( return AdvanceStatus::OK; } - Parameter< Operation::ADVANCE > param; - if( *iteration.m_closed == Iteration::CloseStatus::ClosedTemporarily && - series.m_iterationEncoding == IterationEncoding::fileBased ) + Parameter param; + if (*iteration.m_closed == Iteration::CloseStatus::ClosedTemporarily && + series.m_iterationEncoding == IterationEncoding::fileBased) { /* * If the Series has file-based iteration layout and the file has not * been opened by flushFileFileBased(), there's no use in nagging the * backend to do anything. */ - param.status = std::make_shared< AdvanceStatus >( AdvanceStatus::OK ); + param.status = std::make_shared(AdvanceStatus::OK); } else { param.mode = mode; - IOTask task( &file.m_writable, param ); - IOHandler()->enqueue( task ); + IOTask task(&file.m_writable, param); + IOHandler()->enqueue(task); } - - if( oldCloseStatus == Iteration::CloseStatus::ClosedInFrontend && - mode == AdvanceMode::ENDSTEP ) + if (oldCloseStatus == Iteration::CloseStatus::ClosedInFrontend && + mode == AdvanceMode::ENDSTEP) { using IE = IterationEncoding; - switch( series.m_iterationEncoding ) + switch (series.m_iterationEncoding) { - case IE::fileBased: + case IE::fileBased: { + if (*iteration.m_closed != + Iteration::CloseStatus::ClosedTemporarily) { - if( *iteration.m_closed != - Iteration::CloseStatus::ClosedTemporarily ) - { - Parameter< Operation::CLOSE_FILE > fClose; - IOHandler()->enqueue( - IOTask( &iteration, std::move( fClose ) ) ); - } - *iteration.m_closed = Iteration::CloseStatus::ClosedInBackend; - break; + Parameter fClose; + IOHandler()->enqueue(IOTask(&iteration, std::move(fClose))); } - case IE::groupBased: - { - // We can now put some groups to rest - Parameter< Operation::CLOSE_PATH > fClose; - IOHandler()->enqueue( IOTask( &iteration, std::move( fClose ) ) ); - // In group-based iteration layout, files are - // not closed on a per-iteration basis - // We will treat it as such nonetheless - *iteration.m_closed = Iteration::CloseStatus::ClosedInBackend; - break; - } - case IE::variableBased: // no action necessary - break; + *iteration.m_closed = Iteration::CloseStatus::ClosedInBackend; + break; + } + case IE::groupBased: { + // We can now put some groups to rest + Parameter fClose; + IOHandler()->enqueue(IOTask(&iteration, std::move(fClose))); + // In group-based iteration layout, files are + // not closed on a per-iteration basis + // We will treat it as such nonetheless + *iteration.m_closed = Iteration::CloseStatus::ClosedInBackend; + break; + } + case IE::variableBased: // no action necessary + break; } } - // We cannot call SeriesInterface::flush now, since the IO handler is still filled - // from calling flush(Group|File)based, but has not been emptied yet + // We cannot call SeriesInterface::flush now, since the IO handler is still + // filled from calling flush(Group|File)based, but has not been emptied yet // Do that manually - IOHandler()->m_flushLevel = FlushLevel::UserFlush; - try - { - IOHandler()->flush(); - } - catch( ... ) - { - IOHandler()->m_flushLevel = FlushLevel::InternalFlush; - throw; - } - IOHandler()->m_flushLevel = FlushLevel::InternalFlush; + IOHandler()->flush(flushParams); return *param.status; } -auto SeriesInterface::openIterationIfDirty( uint64_t index, Iteration iteration ) +auto SeriesInterface::openIterationIfDirty(uint64_t index, Iteration iteration) -> IterationOpened { /* * Check side conditions on accessing iterations, and if they are fulfilled, * forward function params to openIteration(). */ - if( *iteration.m_closed == Iteration::CloseStatus::ParseAccessDeferred ) + if (*iteration.m_closed == Iteration::CloseStatus::ParseAccessDeferred) { return IterationOpened::RemainsClosed; } bool const dirtyRecursive = iteration.dirtyRecursive(); - if( *iteration.m_closed == Iteration::CloseStatus::ClosedInBackend ) + if (*iteration.m_closed == Iteration::CloseStatus::ClosedInBackend) { // file corresponding with the iteration has previously been // closed and fully flushed // verify that there have been no further accesses - if( !iteration.written() ) + if (!iteration.written()) { throw std::runtime_error( "[Series] Closed iteration has not been written. This " - "is an internal error." ); + "is an internal error."); } - if( dirtyRecursive ) + if (dirtyRecursive) { throw std::runtime_error( "[Series] Detected illegal access to iteration that " - "has been closed previously." ); + "has been closed previously."); } return IterationOpened::RemainsClosed; } - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; case IE::fileBased: @@ -1318,10 +1361,10 @@ auto SeriesInterface::openIterationIfDirty( uint64_t index, Iteration iteration * 2. Or the Series has been changed globally in a manner that * requires adapting all iterations. */ - if( dirtyRecursive || this->dirty() ) + if (dirtyRecursive || this->dirty()) { // openIteration() will update the close status - openIteration( index, iteration ); + openIteration(index, iteration); return IterationOpened::HasBeenOpened; } break; @@ -1331,22 +1374,22 @@ auto SeriesInterface::openIterationIfDirty( uint64_t index, Iteration iteration // this makes groupBased encoding safer for parallel usage // (variable-based encoding runs in lockstep anyway) // openIteration() will update the close status - openIteration( index, iteration ); + openIteration(index, iteration); return IterationOpened::HasBeenOpened; } return IterationOpened::RemainsClosed; } -void SeriesInterface::openIteration( uint64_t index, Iteration iteration ) +void SeriesInterface::openIteration(uint64_t index, Iteration iteration) { auto oldStatus = *iteration.m_closed; - switch( *iteration.m_closed ) + switch (*iteration.m_closed) { using CL = Iteration::CloseStatus; case CL::ClosedInBackend: throw std::runtime_error( "[Series] Detected illegal access to iteration that " - "has been closed previously." ); + "has been closed previously."); case CL::ParseAccessDeferred: case CL::Open: case CL::ClosedTemporarily: @@ -1363,7 +1406,7 @@ void SeriesInterface::openIteration( uint64_t index, Iteration iteration ) * Use two nested switches anyway to ensure compiler warnings upon adding * values to the enums. */ - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; case IE::fileBased: { @@ -1376,29 +1419,29 @@ void SeriesInterface::openIteration( uint64_t index, Iteration iteration ) * Similarly, in Create mode, the iteration must first be created * before it is possible to open it. */ - if( !iteration.written() && - ( IOHandler()->m_frontendAccess == Access::CREATE || - oldStatus != Iteration::CloseStatus::ParseAccessDeferred ) ) + if (!iteration.written() && + (IOHandler()->m_frontendAccess == Access::CREATE || + oldStatus != Iteration::CloseStatus::ParseAccessDeferred)) { // nothing to do, file will be opened by writing routines break; } - auto & series = get(); + auto &series = get(); // open the iteration's file again - Parameter< Operation::OPEN_FILE > fOpen; + Parameter fOpen; fOpen.encoding = iterationEncoding(); - fOpen.name = iterationFilename( index ); - IOHandler()->enqueue( IOTask( this, fOpen ) ); + fOpen.name = iterationFilename(index); + IOHandler()->enqueue(IOTask(this, fOpen)); /* open base path */ - Parameter< Operation::OPEN_PATH > pOpen; - pOpen.path = auxiliary::replace_first( basePath(), "%T/", "" ); - IOHandler()->enqueue( IOTask( &series.iterations, pOpen ) ); + Parameter pOpen; + pOpen.path = auxiliary::replace_first(basePath(), "%T/", ""); + IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); /* open iteration path */ pOpen.path = iterationEncoding() == IterationEncoding::variableBased ? "" - : std::to_string( index ); - IOHandler()->enqueue( IOTask( &iteration, pOpen ) ); + : std::to_string(index); + IOHandler()->enqueue(IOTask(&iteration, pOpen)); break; } case IE::groupBased: @@ -1410,88 +1453,89 @@ void SeriesInterface::openIteration( uint64_t index, Iteration iteration ) namespace { -template< typename T > -void getJsonOption( - nlohmann::json const & config, std::string const & key, T & dest ) -{ - if( config.contains( key ) ) + template + void + getJsonOption(nlohmann::json const &config, std::string const &key, T &dest) { - dest = config.at( key ).get< T >(); + if (config.contains(key)) + { + dest = config.at(key).get(); + } } -} -void parseJsonOptions( - internal::SeriesData & series, nlohmann::json const & options ) -{ - getJsonOption( options, "defer_iteration_parsing", series.m_parseLazily ); -} -} + void parseJsonOptions( + internal::SeriesData &series, nlohmann::json const &options) + { + getJsonOption(options, "defer_iteration_parsing", series.m_parseLazily); + } +} // namespace namespace internal { #if openPMD_HAVE_MPI -SeriesInternal::SeriesInternal( - std::string const & filepath, - Access at, - MPI_Comm comm, - std::string const & options ) - : SeriesInterface{ - static_cast< internal::SeriesData * >( this ), - static_cast< internal::AttributableData * >( this ) } -{ - nlohmann::json optionsJson = auxiliary::parseOptions( options, comm ); - parseJsonOptions( *this, optionsJson ); - auto input = parseInput( filepath ); - auto handler = createIOHandler( - input->path, at, input->format, comm, std::move( optionsJson ) ); - init( handler, std::move( input ) ); -} + SeriesInternal::SeriesInternal( + std::string const &filepath, + Access at, + MPI_Comm comm, + std::string const &options) + : SeriesInterface{ + static_cast(this), + static_cast(this)} + { + nlohmann::json optionsJson = auxiliary::parseOptions(options, comm); + parseJsonOptions(*this, optionsJson); + auto input = parseInput(filepath); + auto handler = createIOHandler( + input->path, at, input->format, comm, std::move(optionsJson)); + init(handler, std::move(input)); + } #endif -SeriesInternal::SeriesInternal( - std::string const & filepath, Access at, std::string const & options ) - : SeriesInterface{ - static_cast< internal::SeriesData * >( this ), - static_cast< internal::AttributableData * >( this ) } -{ - nlohmann::json optionsJson = auxiliary::parseOptions( options ); - parseJsonOptions( *this, optionsJson ); - auto input = parseInput( filepath ); - auto handler = createIOHandler( - input->path, at, input->format, std::move( optionsJson ) ); - init( handler, std::move( input ) ); -} + SeriesInternal::SeriesInternal( + std::string const &filepath, Access at, std::string const &options) + : SeriesInterface{ + static_cast(this), + static_cast(this)} + { + nlohmann::json optionsJson = auxiliary::parseOptions(options); + parseJsonOptions(*this, optionsJson); + auto input = parseInput(filepath); + auto handler = createIOHandler( + input->path, at, input->format, std::move(optionsJson)); + init(handler, std::move(input)); + } -SeriesInternal::~SeriesInternal() -{ - // we must not throw in a destructor - try + SeriesInternal::~SeriesInternal() { - auto & series = get(); - // WriteIterations gets the first shot at flushing - series.m_writeIterations = auxiliary::Option< WriteIterations >(); - /* - * Scenario: A user calls `Series::flush()` but does not check for - * thrown exceptions. The exception will propagate further up, usually - * thereby popping the stack frame that holds the `Series` object. - * `Series::~Series()` will run. This check avoids that the `Series` is - * needlessly flushed a second time. Otherwise, error messages can get - * very confusing. - */ - if( get().m_lastFlushSuccessful ) + // we must not throw in a destructor + try { - flush(); + auto &series = get(); + // WriteIterations gets the first shot at flushing + series.m_writeIterations = auxiliary::Option(); + /* + * Scenario: A user calls `Series::flush()` but does not check for + * thrown exceptions. The exception will propagate further up, + * usually thereby popping the stack frame that holds the `Series` + * object. `Series::~Series()` will run. This check avoids that the + * `Series` is needlessly flushed a second time. Otherwise, error + * messages can get very confusing. + */ + if (get().m_lastFlushSuccessful) + { + flush(); + } + } + catch (std::exception const &ex) + { + std::cerr << "[~Series] An error occurred: " << ex.what() + << std::endl; + } + catch (...) + { + std::cerr << "[~Series] An error occurred." << std::endl; } } - catch( std::exception const & ex ) - { - std::cerr << "[~Series] An error occurred: " << ex.what() << std::endl; - } - catch( ... ) - { - std::cerr << "[~Series] An error occurred." << std::endl; - } -} } // namespace internal Series::Series( std::shared_ptr< internal::SeriesInternal > series_in ) @@ -1500,41 +1544,37 @@ Series::Series( std::shared_ptr< internal::SeriesInternal > series_in ) static_cast< internal::AttributableData * >( series_in.get() ) } , m_series{ std::move( series_in ) } , iterations{ m_series->iterations } -{ -} +{} -Series::Series() : SeriesInterface{ nullptr, nullptr }, iterations{} -{ -} +Series::Series() : SeriesInterface{nullptr, nullptr}, iterations{} +{} #if openPMD_HAVE_MPI Series::Series( - std::string const & filepath, + std::string const &filepath, Access at, MPI_Comm comm, - std::string const & options ) - : SeriesInterface{ nullptr, nullptr } - , m_series{ std::make_shared< internal::SeriesInternal >( - filepath, at, comm, options ) } - , iterations{ m_series->iterations } + std::string const &options) + : SeriesInterface{nullptr, nullptr} + , m_series{std::make_shared( + filepath, at, comm, options)} + , iterations{m_series->iterations} { AttributableInterface::m_attri = - static_cast< internal::AttributableData * >( m_series.get() ); + static_cast(m_series.get()); SeriesInterface::m_series = m_series.get(); } #endif Series::Series( - std::string const & filepath, - Access at, - std::string const & options) - : SeriesInterface{ nullptr, nullptr } - , m_series{ std::make_shared< internal::SeriesInternal >( - filepath, at, options ) } - , iterations{ m_series->iterations } + std::string const &filepath, Access at, std::string const &options) + : SeriesInterface{nullptr, nullptr} + , m_series{std::make_shared( + filepath, at, options)} + , iterations{m_series->iterations} { AttributableInterface::m_attri = - static_cast< internal::AttributableData * >( m_series.get() ); + static_cast(m_series.get()); SeriesInterface::m_series = m_series.get(); } @@ -1547,51 +1587,52 @@ ReadIterations Series::readIterations() { // Use private constructor instead of copy constructor to avoid // object slicing - return { this->m_series }; + return {this->m_series}; } -WriteIterations -Series::writeIterations() +WriteIterations Series::writeIterations() { - auto & series = get(); - if( !series.m_writeIterations.has_value() ) + auto &series = get(); + if (!series.m_writeIterations.has_value()) { - series.m_writeIterations = WriteIterations( this->iterations ); + series.m_writeIterations = WriteIterations(this->iterations); } return series.m_writeIterations.get(); } namespace { - std::string - cleanFilename(std::string const &filename, Format f) { - switch (f) { - case Format::HDF5: - case Format::ADIOS1: - case Format::ADIOS2: - case Format::ADIOS2_SST: - case Format::ADIOS2_SSC: - case Format::JSON: - return auxiliary::replace_last(filename, suffix(f), ""); - default: - return filename; + std::string cleanFilename(std::string const &filename, Format f) + { + switch (f) + { + case Format::HDF5: + case Format::ADIOS1: + case Format::ADIOS2: + case Format::ADIOS2_SST: + case Format::ADIOS2_SSC: + case Format::JSON: + return auxiliary::replace_last(filename, suffix(f), ""); + default: + return filename; } } std::function - buildMatcher(std::string const ®exPattern, int padding) { + buildMatcher(std::string const ®exPattern, int padding) + { std::regex pattern(regexPattern); return [pattern, padding](std::string const &filename) -> Match { std::smatch regexMatches; bool match = std::regex_match(filename, regexMatches, pattern); - int processedPadding = padding != 0 - ? padding - : ( match ? regexMatches[ 1 ].length() : 0 ); + int processedPadding = + padding != 0 ? padding : (match ? regexMatches[1].length() : 0); return { match, processedPadding, - match ? std::stoull( regexMatches[ 1 ] ) : 0 }; }; + match ? std::stoull(regexMatches[1]) : 0}; + }; } std::function matcher( @@ -1600,8 +1641,8 @@ namespace std::string const &postfix, Format f) { - std::string filenameSuffix = suffix( f ); - if( filenameSuffix.empty() ) + std::string filenameSuffix = suffix(f); + if (filenameSuffix.empty()) { return [](std::string const &) -> Match { return {false, 0, 0}; }; } @@ -1627,5 +1668,5 @@ namespace nameReg += postfix + filenameSuffix + "$"; return buildMatcher(nameReg, padding); } -} // namespace [anonymous] +} // namespace } // namespace openPMD diff --git a/src/WriteIterations.cpp b/src/WriteIterations.cpp index 678767ae60..6fb9745a45 100644 --- a/src/WriteIterations.cpp +++ b/src/WriteIterations.cpp @@ -25,54 +25,51 @@ namespace openPMD { -WriteIterations::SharedResources::SharedResources( iterations_t _iterations ) - : iterations( std::move( _iterations ) ) -{ -} +WriteIterations::SharedResources::SharedResources(iterations_t _iterations) + : iterations(std::move(_iterations)) +{} WriteIterations::SharedResources::~SharedResources() { - if( currentlyOpen.has_value() && - iterations.retrieveSeries().get().m_lastFlushSuccessful ) + if (currentlyOpen.has_value() && + iterations.retrieveSeries().get().m_lastFlushSuccessful) { auto lastIterationIndex = currentlyOpen.get(); - auto & lastIteration = iterations.at( lastIterationIndex ); - if( !lastIteration.closed() ) + auto &lastIteration = iterations.at(lastIterationIndex); + if (!lastIteration.closed()) { lastIteration.close(); } } } -WriteIterations::WriteIterations( iterations_t iterations ) - : shared{ std::make_shared< SharedResources >( std::move( iterations ) ) } -{ -} +WriteIterations::WriteIterations(iterations_t iterations) + : shared{std::make_shared(std::move(iterations))} +{} -WriteIterations::mapped_type & -WriteIterations::operator[]( key_type const & key ) +WriteIterations::mapped_type &WriteIterations::operator[](key_type const &key) { // make a copy // explicit cast so MSVC can figure out how to do it correctly - return operator[]( static_cast< key_type && >( key_type{ key } ) ); + return operator[](static_cast(key_type{key})); } -WriteIterations::mapped_type & WriteIterations::operator[]( key_type && key ) +WriteIterations::mapped_type &WriteIterations::operator[](key_type &&key) { - if( shared->currentlyOpen.has_value() ) + if (shared->currentlyOpen.has_value()) { auto lastIterationIndex = shared->currentlyOpen.get(); - auto & lastIteration = shared->iterations.at( lastIterationIndex ); - if( lastIterationIndex != key && !lastIteration.closed() ) + auto &lastIteration = shared->iterations.at(lastIterationIndex); + if (lastIterationIndex != key && !lastIteration.closed()) { lastIteration.close(); } } shared->currentlyOpen = key; - auto & res = shared->iterations[ std::move( key ) ]; - if( res.getStepStatus() == StepStatus::NoStep ) + auto &res = shared->iterations[std::move(key)]; + if (res.getStepStatus() == StepStatus::NoStep) { - res.beginStep(); - res.setStepStatus( StepStatus::DuringStep ); + res.beginStep(/* reread = */ false); + res.setStepStatus(StepStatus::DuringStep); } return res; } diff --git a/src/auxiliary/Date.cpp b/src/auxiliary/Date.cpp index 0cee1ae6e8..21f9f89132 100644 --- a/src/auxiliary/Date.cpp +++ b/src/auxiliary/Date.cpp @@ -22,26 +22,26 @@ #include #include -#include #include - +#include namespace openPMD { namespace auxiliary { - std::string getDateString( std::string const & format ) + std::string getDateString(std::string const &format) { constexpr size_t maxLen = 30u; - std::array< char, maxLen > buffer; + std::array buffer; time_t rawtime; - time( &rawtime ); - struct tm* timeinfo; + time(&rawtime); + struct tm *timeinfo; // https://github.com/openPMD/openPMD-api/pull/657#issuecomment-574424885 - timeinfo = localtime( &rawtime ); // lgtm[cpp/potentially-dangerous-function] + timeinfo = + localtime(&rawtime); // lgtm[cpp/potentially-dangerous-function] - strftime( buffer.data(), maxLen, format.c_str(), timeinfo ); + strftime(buffer.data(), maxLen, format.c_str(), timeinfo); std::stringstream dateString; dateString << buffer.data(); diff --git a/src/auxiliary/Filesystem.cpp b/src/auxiliary/Filesystem.cpp index 4bd7765a4d..d3a16b2150 100644 --- a/src/auxiliary/Filesystem.cpp +++ b/src/auxiliary/Filesystem.cpp @@ -23,12 +23,12 @@ #include "openPMD/auxiliary/Unused.hpp" #ifdef _WIN32 -# include +#include #else -# include -# include -# include -# include +#include +#include +#include +#include #endif #include @@ -36,225 +36,233 @@ #include #include - namespace openPMD { namespace auxiliary { -bool -directory_exists(std::string const& path) -{ + bool directory_exists(std::string const &path) + { #ifdef _WIN32 - DWORD attributes = GetFileAttributes(path.c_str()); + DWORD attributes = GetFileAttributes(path.c_str()); - return (attributes != INVALID_FILE_ATTRIBUTES && + return ( + attributes != INVALID_FILE_ATTRIBUTES && (attributes & FILE_ATTRIBUTE_DIRECTORY)); #else - struct stat s; - return (0 == stat(path.c_str(), &s)) && S_ISDIR(s.st_mode); + struct stat s; + return (0 == stat(path.c_str(), &s)) && S_ISDIR(s.st_mode); #endif -} + } -bool -file_exists( std::string const& path ) -{ + bool file_exists(std::string const &path) + { #ifdef _WIN32 - DWORD attributes = GetFileAttributes(path.c_str()); + DWORD attributes = GetFileAttributes(path.c_str()); - return (attributes != INVALID_FILE_ATTRIBUTES && + return ( + attributes != INVALID_FILE_ATTRIBUTES && !(attributes & FILE_ATTRIBUTE_DIRECTORY)); #else - struct stat s; - return (0 == stat(path.c_str(), &s)) && S_ISREG(s.st_mode); + struct stat s; + return (0 == stat(path.c_str(), &s)) && S_ISREG(s.st_mode); #endif -} + } -std::vector< std::string > -list_directory(std::string const& path ) -{ - std::vector< std::string > ret; + std::vector list_directory(std::string const &path) + { + std::vector ret; #ifdef _WIN32 - std::string pattern(path); - pattern.append("\\*"); - WIN32_FIND_DATA data; - HANDLE hFind = FindFirstFile(pattern.c_str(), &data); - if( hFind == INVALID_HANDLE_VALUE ) - throw std::system_error(std::error_code(errno, std::system_category())); - do { - if( strcmp(data.cFileName, ".") != 0 && strcmp(data.cFileName, "..") != 0 ) - ret.emplace_back(data.cFileName); - } while (FindNextFile(hFind, &data) != 0); - FindClose(hFind); + std::string pattern(path); + pattern.append("\\*"); + WIN32_FIND_DATA data; + HANDLE hFind = FindFirstFile(pattern.c_str(), &data); + if (hFind == INVALID_HANDLE_VALUE) + throw std::system_error( + std::error_code(errno, std::system_category())); + do + { + if (strcmp(data.cFileName, ".") != 0 && + strcmp(data.cFileName, "..") != 0) + ret.emplace_back(data.cFileName); + } while (FindNextFile(hFind, &data) != 0); + FindClose(hFind); #else - auto directory = opendir(path.c_str()); - if( !directory ) - throw std::system_error(std::error_code(errno, std::system_category())); - dirent* entry; - while ((entry = readdir(directory)) != nullptr) - if( strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0 ) - ret.emplace_back(entry->d_name); - closedir(directory); + auto directory = opendir(path.c_str()); + if (!directory) + throw std::system_error( + std::error_code(errno, std::system_category())); + dirent *entry; + while ((entry = readdir(directory)) != nullptr) + if (strcmp(entry->d_name, ".") != 0 && + strcmp(entry->d_name, "..") != 0) + ret.emplace_back(entry->d_name); + closedir(directory); #endif - return ret; -} + return ret; + } -bool -create_directories( std::string const& path ) -{ - if( directory_exists(path) ) - return true; + bool create_directories(std::string const &path) + { + if (directory_exists(path)) + return true; #ifdef _WIN32 - auto mk = [](std::string const& p) -> bool { return CreateDirectory(p.c_str(), nullptr); }; + auto mk = [](std::string const &p) -> bool { + return CreateDirectory(p.c_str(), nullptr); + }; #else - mode_t mask = umask(0); - umask(mask); - auto mk = [mask](std::string const& p) -> bool { return (0 == mkdir(p.c_str(), 0777 & ~mask));}; + mode_t mask = umask(0); + umask(mask); + auto mk = [mask](std::string const &p) -> bool { + return (0 == mkdir(p.c_str(), 0777 & ~mask)); + }; #endif - std::istringstream ss(path); - std::string token; + std::istringstream ss(path); + std::string token; - std::string partialPath; - if( auxiliary::starts_with(path, directory_separator) ) - partialPath += directory_separator; - bool success = true; - while( std::getline( ss, token, directory_separator ) ) - { - if( !token.empty() ) - partialPath += token + directory_separator; - if( !directory_exists( partialPath ) ) + std::string partialPath; + if (auxiliary::starts_with(path, directory_separator)) + partialPath += directory_separator; + bool success = true; + while (std::getline(ss, token, directory_separator)) { - bool partial_success = mk(partialPath); - if( !partial_success ) - // did someone else just race us to create this dir? - if( !directory_exists( partialPath ) ) - success = success && partial_success; + if (!token.empty()) + partialPath += token + directory_separator; + if (!directory_exists(partialPath)) + { + bool partial_success = mk(partialPath); + if (!partial_success) + // did someone else just race us to create this dir? + if (!directory_exists(partialPath)) + success = success && partial_success; + } } + return success; } - return success; -} -bool -remove_directory( std::string const& path ) -{ - if( !directory_exists(path) ) - return false; + bool remove_directory(std::string const &path) + { + if (!directory_exists(path)) + return false; - bool success = true; + bool success = true; #ifdef _WIN32 - auto del = [](std::string const& p) -> bool { return RemoveDirectory(p.c_str()); }; + auto del = [](std::string const &p) -> bool { + return RemoveDirectory(p.c_str()); + }; #else - auto del = [](std::string const& p) -> bool { return (0 == remove(p.c_str()));}; + auto del = [](std::string const &p) -> bool { + return (0 == remove(p.c_str())); + }; #endif - for( auto const& entry : list_directory(path) ) - { - std::string partialPath = path + directory_separator + entry; - if( directory_exists(partialPath) ) - success &= remove_directory(partialPath); - else if( file_exists(partialPath) ) - success &= remove_file(partialPath); + for (auto const &entry : list_directory(path)) + { + std::string partialPath = path + directory_separator + entry; + if (directory_exists(partialPath)) + success &= remove_directory(partialPath); + else if (file_exists(partialPath)) + success &= remove_file(partialPath); + } + success &= del(path); + return success; } - success &= del(path); - return success; -} -bool -remove_file( std::string const& path ) -{ - if( !file_exists(path) ) - return false; + bool remove_file(std::string const &path) + { + if (!file_exists(path)) + return false; #ifdef _WIN32 - return DeleteFile(path.c_str()); + return DeleteFile(path.c_str()); #else - return (0 == remove(path.c_str())); + return (0 == remove(path.c_str())); #endif -} + } #if openPMD_HAVE_MPI -namespace -{ - template< typename > - struct MPI_Types; - - template<> - struct MPI_Types< unsigned long > + namespace { - static MPI_Datatype const value; - }; + template + struct MPI_Types; - template<> - struct MPI_Types< unsigned long long > - { - static MPI_Datatype const value; - }; + template <> + struct MPI_Types + { + static MPI_Datatype const value; + }; - template<> - struct MPI_Types< unsigned > - { - static MPI_Datatype const value; - }; + template <> + struct MPI_Types + { + static MPI_Datatype const value; + }; - /* - * Only some of these are actually instanciated, - * so suppress warnings for the others. - */ - OPENPMDAPI_UNUSED - MPI_Datatype const MPI_Types< unsigned >::value = MPI_UNSIGNED; - OPENPMDAPI_UNUSED - MPI_Datatype const MPI_Types< unsigned long >::value = MPI_UNSIGNED_LONG; - OPENPMDAPI_UNUSED - MPI_Datatype const MPI_Types< unsigned long long >::value = MPI_UNSIGNED_LONG_LONG; -} // namespace + template <> + struct MPI_Types + { + static MPI_Datatype const value; + }; -std::string -collective_file_read( std::string const & path, MPI_Comm comm ) -{ - int rank, size; - MPI_Comm_rank( comm, &rank ); - MPI_Comm_size( comm, &size ); + /* + * Only some of these are actually instanciated, + * so suppress warnings for the others. + */ + OPENPMDAPI_UNUSED + MPI_Datatype const MPI_Types::value = MPI_UNSIGNED; + OPENPMDAPI_UNUSED + MPI_Datatype const MPI_Types::value = MPI_UNSIGNED_LONG; + OPENPMDAPI_UNUSED + MPI_Datatype const MPI_Types::value = + MPI_UNSIGNED_LONG_LONG; + } // namespace - std::string res; - size_t stringLength = 0; - if( rank == 0 ) + std::string collective_file_read(std::string const &path, MPI_Comm comm) { - std::fstream handle; - handle.open( path, std::ios_base::in ); - std::stringstream stream; - stream << handle.rdbuf(); - res = stream.str(); - if( !handle.good() ) + int rank, size; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &size); + + std::string res; + size_t stringLength = 0; + if (rank == 0) + { + std::fstream handle; + handle.open(path, std::ios_base::in); + std::stringstream stream; + stream << handle.rdbuf(); + res = stream.str(); + if (!handle.good()) + { + throw std::runtime_error( + "Failed reading JSON config from file " + path + "."); + } + stringLength = res.size() + 1; + } + MPI_Datatype datatype = MPI_Types::value; + int err = MPI_Bcast(&stringLength, 1, datatype, 0, comm); + if (err) { throw std::runtime_error( - "Failed reading JSON config from file " + path + "." ); + "[collective_file_read] MPI_Bcast stringLength failure."); } - stringLength = res.size() + 1; - } - MPI_Datatype datatype = MPI_Types< size_t >::value; - int err = MPI_Bcast( &stringLength, 1, datatype, 0, comm ); - if( err ) - { - throw std::runtime_error( - "[collective_file_read] MPI_Bcast stringLength failure." ); - } - std::vector< char > recvbuf( stringLength, 0 ); - if(rank == 0) - { - std::copy_n(res.c_str(), stringLength, recvbuf.data()); - } - err = MPI_Bcast( recvbuf.data(), stringLength, MPI_CHAR, 0, comm ); - if( err ) - { - throw std::runtime_error( - "[collective_file_read] MPI_Bcast file content failure." ); - } - if( rank != 0 ) - { - res = recvbuf.data(); + std::vector recvbuf(stringLength, 0); + if (rank == 0) + { + std::copy_n(res.c_str(), stringLength, recvbuf.data()); + } + err = MPI_Bcast(recvbuf.data(), stringLength, MPI_CHAR, 0, comm); + if (err) + { + throw std::runtime_error( + "[collective_file_read] MPI_Bcast file content failure."); + } + if (rank != 0) + { + res = recvbuf.data(); + } + return res; } - return res; -} #endif diff --git a/src/auxiliary/JSON.cpp b/src/auxiliary/JSON.cpp index d0e844c525..d0e28779a0 100644 --- a/src/auxiliary/JSON.cpp +++ b/src/auxiliary/JSON.cpp @@ -34,69 +34,62 @@ namespace openPMD { namespace auxiliary { - TracingJSON::TracingJSON() : TracingJSON( nlohmann::json() ) - { - } + TracingJSON::TracingJSON() : TracingJSON(nlohmann::json()) + {} - TracingJSON::TracingJSON( nlohmann::json originalJSON ) + TracingJSON::TracingJSON(nlohmann::json originalJSON) : m_originalJSON( - std::make_shared< nlohmann::json >( std::move( originalJSON ) ) ), - m_shadow( std::make_shared< nlohmann::json >() ), - m_positionInOriginal( &*m_originalJSON ), - m_positionInShadow( &*m_shadow ) - { - } + std::make_shared(std::move(originalJSON))) + , m_shadow(std::make_shared()) + , m_positionInOriginal(&*m_originalJSON) + , m_positionInShadow(&*m_shadow) + {} - nlohmann::json const & - TracingJSON::getShadow() + nlohmann::json const &TracingJSON::getShadow() { return *m_positionInShadow; } - nlohmann::json - TracingJSON::invertShadow() + nlohmann::json TracingJSON::invertShadow() { nlohmann::json inverted = *m_positionInOriginal; - invertShadow( inverted, *m_positionInShadow ); + invertShadow(inverted, *m_positionInShadow); return inverted; } - void - TracingJSON::invertShadow( - nlohmann::json & result, - nlohmann::json const & shadow ) + void TracingJSON::invertShadow( + nlohmann::json &result, nlohmann::json const &shadow) { - if( !shadow.is_object() ) + if (!shadow.is_object()) { return; } - std::vector< std::string > toRemove; - for( auto it = shadow.begin(); it != shadow.end(); ++it ) + std::vector toRemove; + for (auto it = shadow.begin(); it != shadow.end(); ++it) { - nlohmann::json & partialResult = result[ it.key() ]; - if( partialResult.is_object() ) + nlohmann::json &partialResult = result[it.key()]; + if (partialResult.is_object()) { - invertShadow( partialResult, it.value() ); - if( partialResult.size() == 0 ) + invertShadow(partialResult, it.value()); + if (partialResult.size() == 0) { - toRemove.emplace_back( it.key() ); + toRemove.emplace_back(it.key()); } } else { - toRemove.emplace_back( it.key() ); + toRemove.emplace_back(it.key()); } } - for( auto const & key : toRemove ) + for (auto const &key : toRemove) { - result.erase( key ); + result.erase(key); } } - void - TracingJSON::declareFullyRead() + void TracingJSON::declareFullyRead() { - if( m_trace ) + if (m_trace) { // copy over *m_positionInShadow = *m_positionInOriginal; @@ -104,76 +97,74 @@ namespace auxiliary } TracingJSON::TracingJSON( - std::shared_ptr< nlohmann::json > originalJSON, - std::shared_ptr< nlohmann::json > shadow, - nlohmann::json * positionInOriginal, - nlohmann::json * positionInShadow, - bool trace ) - : m_originalJSON( std::move( originalJSON ) ), - m_shadow( std::move( shadow ) ), - m_positionInOriginal( positionInOriginal ), - m_positionInShadow( positionInShadow ), - m_trace( trace ) - { - } + std::shared_ptr originalJSON, + std::shared_ptr shadow, + nlohmann::json *positionInOriginal, + nlohmann::json *positionInShadow, + bool trace) + : m_originalJSON(std::move(originalJSON)) + , m_shadow(std::move(shadow)) + , m_positionInOriginal(positionInOriginal) + , m_positionInShadow(positionInShadow) + , m_trace(trace) + {} - namespace { - auxiliary::Option< std::string > - extractFilename( std::string const & unparsed ) + namespace { - std::string trimmed = auxiliary::trim( - unparsed, []( char c ) { return std::isspace( c ); } ); - if( trimmed.at( 0 ) == '@' ) - { - trimmed = trimmed.substr( 1 ); - trimmed = auxiliary::trim( - trimmed, []( char c ) { return std::isspace( c ); } ); - return auxiliary::makeOption( trimmed ); - } - else + auxiliary::Option + extractFilename(std::string const &unparsed) { - return auxiliary::Option< std::string >{}; + std::string trimmed = auxiliary::trim( + unparsed, [](char c) { return std::isspace(c); }); + if (trimmed.at(0) == '@') + { + trimmed = trimmed.substr(1); + trimmed = auxiliary::trim( + trimmed, [](char c) { return std::isspace(c); }); + return auxiliary::makeOption(trimmed); + } + else + { + return auxiliary::Option{}; + } } - } - } + } // namespace - nlohmann::json - parseOptions( std::string const & options ) + nlohmann::json parseOptions(std::string const &options) { - auto filename = extractFilename( options ); - if( filename.has_value() ) + auto filename = extractFilename(options); + if (filename.has_value()) { std::fstream handle; - handle.open( filename.get(), std::ios_base::in ); + handle.open(filename.get(), std::ios_base::in); nlohmann::json res; handle >> res; - if( !handle.good() ) + if (!handle.good()) { throw std::runtime_error( "Failed reading JSON config from file " + filename.get() + - "." ); + "."); } return res; } else { - return nlohmann::json::parse( options ); + return nlohmann::json::parse(options); } } #if openPMD_HAVE_MPI - nlohmann::json - parseOptions( std::string const & options, MPI_Comm comm ) + nlohmann::json parseOptions(std::string const &options, MPI_Comm comm) { - auto filename = extractFilename( options ); - if( filename.has_value() ) + auto filename = extractFilename(options); + if (filename.has_value()) { return nlohmann::json::parse( - auxiliary::collective_file_read( filename.get(), comm ) ); + auxiliary::collective_file_read(filename.get(), comm)); } else { - return nlohmann::json::parse( options ); + return nlohmann::json::parse(options); } } #endif diff --git a/src/backend/Attributable.cpp b/src/backend/Attributable.cpp index 59b1d43e04..9c3f737f9e 100644 --- a/src/backend/Attributable.cpp +++ b/src/backend/Attributable.cpp @@ -33,155 +33,147 @@ namespace openPMD { namespace internal { -AttributableData::AttributableData() : m_writable{ this } -{ -} -} + AttributableData::AttributableData() : m_writable{this} + {} +} // namespace internal -AttributableInterface::AttributableInterface( internal::AttributableData * attri ) - : m_attri{ attri } -{ -} +AttributableInterface::AttributableInterface(internal::AttributableData *attri) + : m_attri{attri} +{} -Attribute -AttributableInterface::getAttribute(std::string const& key) const +Attribute AttributableInterface::getAttribute(std::string const &key) const { - auto & attri = get(); + auto &attri = get(); auto it = attri.m_attributes.find(key); - if( it != attri.m_attributes.cend() ) + if (it != attri.m_attributes.cend()) return it->second; throw no_such_attribute_error(key); } -bool -AttributableInterface::deleteAttribute(std::string const& key) +bool AttributableInterface::deleteAttribute(std::string const &key) { - auto & attri = get(); - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not delete an Attribute in a read-only Series."); + auto &attri = get(); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not delete an Attribute in a read-only Series."); auto it = attri.m_attributes.find(key); - if( it != attri.m_attributes.end() ) + if (it != attri.m_attributes.end()) { - Parameter< Operation::DELETE_ATT > aDelete; + Parameter aDelete; aDelete.name = key; IOHandler()->enqueue(IOTask(this, aDelete)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); attri.m_attributes.erase(it); return true; } return false; } -std::vector< std::string > -AttributableInterface::attributes() const +std::vector AttributableInterface::attributes() const { - auto & attri = get(); - std::vector< std::string > ret; + auto &attri = get(); + std::vector ret; ret.reserve(attri.m_attributes.size()); - for( auto const& entry : attri.m_attributes ) + for (auto const &entry : attri.m_attributes) ret.emplace_back(entry.first); return ret; } -size_t -AttributableInterface::numAttributes() const +size_t AttributableInterface::numAttributes() const { return get().m_attributes.size(); } -bool -AttributableInterface::containsAttribute(std::string const &key) const +bool AttributableInterface::containsAttribute(std::string const &key) const { - auto & attri = get(); + auto &attri = get(); return attri.m_attributes.find(key) != attri.m_attributes.end(); } -std::string -AttributableInterface::comment() const +std::string AttributableInterface::comment() const { - return getAttribute("comment").get< std::string >(); + return getAttribute("comment").get(); } -AttributableInterface& -AttributableInterface::setComment(std::string const& c) +AttributableInterface &AttributableInterface::setComment(std::string const &c) { setAttribute("comment", c); return *this; } -void -AttributableInterface::seriesFlush() +void AttributableInterface::seriesFlush() { writable().seriesFlush(); } -internal::SeriesInternal const & AttributableInterface::retrieveSeries() const +internal::SeriesInternal const &AttributableInterface::retrieveSeries() const { - Writable const * findSeries = &writable(); - while( findSeries->parent ) + Writable const *findSeries = &writable(); + while (findSeries->parent) { findSeries = findSeries->parent; } - return auxiliary::deref_dynamic_cast< internal::SeriesInternal >( - findSeries->attributable ); + return auxiliary::deref_dynamic_cast( + findSeries->attributable); } -internal::SeriesInternal & AttributableInterface::retrieveSeries() +internal::SeriesInternal &AttributableInterface::retrieveSeries() { - return const_cast< internal::SeriesInternal & >( - static_cast< AttributableInterface const * >( this )->retrieveSeries() ); + return const_cast( + static_cast(this)->retrieveSeries()); } -Iteration const & AttributableInterface::containingIteration() const +Iteration const &AttributableInterface::containingIteration() const { - std::vector< Writable const * > searchQueue; - searchQueue.reserve( 7 ); - Writable const * findSeries = &writable(); - while( findSeries ) + std::vector searchQueue; + searchQueue.reserve(7); + Writable const *findSeries = &writable(); + while (findSeries) { - searchQueue.push_back( findSeries ); + searchQueue.push_back(findSeries); // we don't need to push the last Writable since it's the Series anyway findSeries = findSeries->parent; } // End of the queue: // Iteration -> Series.iterations -> Series - if( searchQueue.size() < 3 ) + if (searchQueue.size() < 3) { throw std::runtime_error( "containingIteration(): Must be called for an object contained in " - "an iteration." ); + "an iteration."); } auto end = searchQueue.rbegin(); - internal::AttributableData const * attr = ( *( end + 2 ) )->attributable; - if( attr == nullptr ) - throw std::runtime_error( "containingIteration(): attributable must not be a nullptr." ); + internal::AttributableData const *attr = (*(end + 2))->attributable; + if (attr == nullptr) + throw std::runtime_error( + "containingIteration(): attributable must not be a nullptr."); /* * We now know the unique instance of Attributable that corresponds with * the iteration. * Since the class Iteration itself still follows the old class design, * we will have to take a detour via Series. */ - auto & series = auxiliary::deref_dynamic_cast< internal::SeriesInternal >( - ( *searchQueue.rbegin() )->attributable ); - for( auto const & pair : series.iterations ) + auto &series = auxiliary::deref_dynamic_cast( + (*searchQueue.rbegin())->attributable); + for (auto const &pair : series.iterations) { - if( &pair.second.get() == attr ) + if (&pair.second.get() == attr) { return pair.second; } } throw std::runtime_error( - "Containing iteration not found in containing Series." ); + "Containing iteration not found in containing Series."); } -Iteration & AttributableInterface::containingIteration() +Iteration &AttributableInterface::containingIteration() { - return const_cast< Iteration & >( - static_cast< AttributableInterface const * >( this ) - ->containingIteration() ); + return const_cast( + static_cast(this) + ->containingIteration()); } std::string Attributable::MyPath::filePath() const @@ -192,49 +184,53 @@ std::string Attributable::MyPath::filePath() const auto AttributableInterface::myPath() const -> MyPath { MyPath res; - Writable const * findSeries = &writable(); - while( findSeries->parent ) + Writable const *findSeries = &writable(); + while (findSeries->parent) { // we don't need to push_back the ownKeyWithinParent of the Series class // so it's alright that this loop doesn't ask the key of the last found // Writable // push these in reverse because we're building the list from the back - for( auto it = findSeries->ownKeyWithinParent.rbegin(); + for (auto it = findSeries->ownKeyWithinParent.rbegin(); it != findSeries->ownKeyWithinParent.rend(); - ++it ) + ++it) { - res.group.push_back(*it ); + res.group.push_back(*it); } findSeries = findSeries->parent; } - std::reverse(res.group.begin(), res.group.end() ); - auto const & series = - auxiliary::deref_dynamic_cast< internal::SeriesInternal >( - findSeries->attributable ); + std::reverse(res.group.begin(), res.group.end()); + auto const &series = + auxiliary::deref_dynamic_cast( + findSeries->attributable); res.seriesName = series.name(); - res.seriesExtension = suffix( series.m_format ); + res.seriesExtension = suffix(series.m_format); res.directory = IOHandler()->directory; return res; } -void -AttributableInterface::seriesFlush( FlushLevel level ) +void Attributable::seriesFlush(internal::FlushParams flushParams) { - writable().seriesFlush( level ); + writable().seriesFlush(flushParams); } -void -AttributableInterface::flushAttributes() +void Attributable::flushAttributes(internal::FlushParams const &flushParams) { - if( IOHandler()->m_flushLevel == FlushLevel::SkeletonOnly ) + switch (flushParams.flushLevel) { + case FlushLevel::SkeletonOnly: + case FlushLevel::CreateOrOpenFiles: return; + case FlushLevel::InternalFlush: + case FlushLevel::UserFlush: + // pass + break; } - if( dirty() ) + if (dirty()) { - Parameter< Operation::WRITE_ATT > aWrite; - for( std::string const & att_name : attributes() ) + Parameter aWrite; + for (std::string const &att_name : attributes()) { aWrite.name = att_name; aWrite.resource = getAttribute(att_name).getResource(); @@ -246,209 +242,309 @@ AttributableInterface::flushAttributes() } } -void -AttributableInterface::readAttributes( ReadMode mode ) +void AttributableInterface::readAttributes(ReadMode mode) { - auto & attri = get(); - Parameter< Operation::LIST_ATTS > aList; + auto &attri = get(); + Parameter aList; IOHandler()->enqueue(IOTask(this, aList)); - IOHandler()->flush(); - std::vector< std::string > written_attributes = attributes(); + IOHandler()->flush(internal::defaultFlushParams); + std::vector written_attributes = attributes(); /* std::set_difference requires sorted ranges */ std::sort(aList.attributes->begin(), aList.attributes->end()); std::sort(written_attributes.begin(), written_attributes.end()); - std::set< std::string > tmpAttributes; - switch( mode ) + std::set tmpAttributes; + switch (mode) { case ReadMode::IgnoreExisting: // reread: aList - written_attributes std::set_difference( - aList.attributes->begin(), aList.attributes->end(), - written_attributes.begin(), written_attributes.end(), + aList.attributes->begin(), + aList.attributes->end(), + written_attributes.begin(), + written_attributes.end(), std::inserter(tmpAttributes, tmpAttributes.begin())); break; case ReadMode::OverrideExisting: - tmpAttributes = std::set< std::string >( - aList.attributes->begin(), - aList.attributes->end() ); + tmpAttributes = std::set( + aList.attributes->begin(), aList.attributes->end()); break; case ReadMode::FullyReread: attri.m_attributes.clear(); - tmpAttributes = std::set< std::string >( - aList.attributes->begin(), - aList.attributes->end() ); + tmpAttributes = std::set( + aList.attributes->begin(), aList.attributes->end()); break; } using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; - for( auto const& att_name : tmpAttributes ) + for (auto const &att_name : tmpAttributes) { aRead.name = att_name; std::string att = auxiliary::strip(att_name, {'\0'}); IOHandler()->enqueue(IOTask(this, aRead)); try { - IOHandler()->flush(); - } catch( unsupported_data_error const& e ) + IOHandler()->flush(internal::defaultFlushParams); + } + catch (unsupported_data_error const &e) { - std::cerr << "Skipping non-standard attribute " - << att << " (" - << e.what() - << ")\n"; + std::cerr << "Skipping non-standard attribute " << att << " (" + << e.what() << ")\n"; continue; } Attribute a(*aRead.resource); - auto guardUnitDimension = - [ this ]( std::string const & key, auto vector ) - { - if( key == "unitDimension" ) + auto guardUnitDimension = [this](std::string const &key, auto vector) { + if (key == "unitDimension") { // Some backends may report the wrong type when reading - if( vector.size() != 7 ) + if (vector.size() != 7) { throw std::runtime_error( "[Attributable] " - "Unexpected datatype for unitDimension." ); + "Unexpected datatype for unitDimension."); } - std::array< double, 7 > arr; - std::copy_n( vector.begin(), 7, arr.begin() ); - setAttribute( key, std::move( arr ) ); + std::array arr; + std::copy_n(vector.begin(), 7, arr.begin()); + setAttributeImpl( + key, + std::move(arr), + internal::SetAttributeMode::WhileReadingAttributes); } else { - setAttribute( key, std::move( vector ) ); + setAttributeImpl( + key, + std::move(vector), + internal::SetAttributeMode::WhileReadingAttributes); } }; - switch( *aRead.dtype ) + switch (*aRead.dtype) { - case DT::CHAR: - setAttribute(att, a.get< char >()); - break; - case DT::UCHAR: - setAttribute(att, a.get< unsigned char >()); - break; - case DT::SHORT: - setAttribute(att, a.get< short >()); - break; - case DT::INT: - setAttribute(att, a.get< int >()); - break; - case DT::LONG: - setAttribute(att, a.get< long >()); - break; - case DT::LONGLONG: - setAttribute(att, a.get< long long >()); - break; - case DT::USHORT: - setAttribute(att, a.get< unsigned short >()); - break; - case DT::UINT: - setAttribute(att, a.get< unsigned int >()); - break; - case DT::ULONG: - setAttribute(att, a.get< unsigned long >()); - break; - case DT::ULONGLONG: - setAttribute(att, a.get< unsigned long long >()); - break; - case DT::FLOAT: - setAttribute(att, a.get< float >()); - break; - case DT::DOUBLE: - setAttribute(att, a.get< double >()); - break; - case DT::LONG_DOUBLE: - setAttribute(att, a.get< long double >()); - break; - case DT::CFLOAT: - setAttribute(att, a.get< std::complex< float > >()); - break; - case DT::CDOUBLE: - setAttribute(att, a.get< std::complex< double > >()); - break; - case DT::CLONG_DOUBLE: - setAttribute(att, a.get< std::complex< long double > >()); - break; - case DT::STRING: - setAttribute(att, a.get< std::string >()); - break; - case DT::VEC_CHAR: - setAttribute(att, a.get< std::vector< char > >()); - break; - case DT::VEC_SHORT: - setAttribute(att, a.get< std::vector< short > >()); - break; - case DT::VEC_INT: - setAttribute(att, a.get< std::vector< int > >()); - break; - case DT::VEC_LONG: - setAttribute(att, a.get< std::vector< long > >()); - break; - case DT::VEC_LONGLONG: - setAttribute(att, a.get< std::vector< long long > >()); - break; - case DT::VEC_UCHAR: - setAttribute(att, a.get< std::vector< unsigned char > >()); - break; - case DT::VEC_USHORT: - setAttribute(att, a.get< std::vector< unsigned short > >()); - break; - case DT::VEC_UINT: - setAttribute(att, a.get< std::vector< unsigned int > >()); - break; - case DT::VEC_ULONG: - setAttribute(att, a.get< std::vector< unsigned long > >()); - break; - case DT::VEC_ULONGLONG: - setAttribute(att, a.get< std::vector< unsigned long long > >()); - break; - case DT::VEC_FLOAT: - guardUnitDimension( att, a.get< std::vector< float > >() ); - break; - case DT::VEC_DOUBLE: - guardUnitDimension( att, a.get< std::vector< double > >() ); - break; - case DT::VEC_LONG_DOUBLE: - guardUnitDimension( att, a.get< std::vector< long double > >() ); - break; - case DT::VEC_CFLOAT: - setAttribute(att, a.get< std::vector< std::complex< float > > >()); - break; - case DT::VEC_CDOUBLE: - setAttribute(att, a.get< std::vector< std::complex< double > > >()); - break; - case DT::VEC_CLONG_DOUBLE: - setAttribute(att, a.get< std::vector< std::complex< long double > > >()); - break; - case DT::VEC_STRING: - setAttribute(att, a.get< std::vector< std::string > >()); - break; - case DT::ARR_DBL_7: - setAttribute(att, a.get< std::array< double, 7 > >()); - break; - case DT::BOOL: - setAttribute(att, a.get< bool >()); - break; - case DT::DATATYPE: - case DT::UNDEFINED: - throw std::runtime_error("Invalid Attribute datatype during read"); + case DT::CHAR: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::UCHAR: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::SHORT: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::INT: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::LONG: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::LONGLONG: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::USHORT: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::UINT: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::ULONG: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::ULONGLONG: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::FLOAT: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::DOUBLE: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::LONG_DOUBLE: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::CFLOAT: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::CDOUBLE: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::CLONG_DOUBLE: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::STRING: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_CHAR: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_SHORT: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_INT: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_LONG: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_LONGLONG: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_UCHAR: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_USHORT: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_UINT: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_ULONG: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_ULONGLONG: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_FLOAT: + guardUnitDimension(att, a.get >()); + break; + case DT::VEC_DOUBLE: + guardUnitDimension(att, a.get >()); + break; + case DT::VEC_LONG_DOUBLE: + guardUnitDimension(att, a.get >()); + break; + case DT::VEC_CFLOAT: + setAttributeImpl( + att, + a.get > >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_CDOUBLE: + setAttributeImpl( + att, + a.get > >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_CLONG_DOUBLE: + setAttributeImpl( + att, + a.get > >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::VEC_STRING: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::ARR_DBL_7: + setAttributeImpl( + att, + a.get >(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::BOOL: + setAttributeImpl( + att, + a.get(), + internal::SetAttributeMode::WhileReadingAttributes); + break; + case DT::DATATYPE: + case DT::UNDEFINED: + throw std::runtime_error("Invalid Attribute datatype during read"); } } dirty() = false; } -void -AttributableInterface::linkHierarchy(Writable& w) +void AttributableInterface::linkHierarchy(Writable &w) { auto handler = w.IOHandler; writable().IOHandler = handler; writable().parent = &w; } -} // openPMD +} // namespace openPMD diff --git a/src/backend/BaseRecordComponent.cpp b/src/backend/BaseRecordComponent.cpp index e099e21b06..0ec5acdc8e 100644 --- a/src/backend/BaseRecordComponent.cpp +++ b/src/backend/BaseRecordComponent.cpp @@ -23,52 +23,49 @@ namespace openPMD { -double -BaseRecordComponent::unitSI() const +double BaseRecordComponent::unitSI() const { - return getAttribute("unitSI").get< double >(); + return getAttribute("unitSI").get(); } -BaseRecordComponent& -BaseRecordComponent::resetDatatype(Datatype d) +BaseRecordComponent &BaseRecordComponent::resetDatatype(Datatype d) { - if( written() ) - throw std::runtime_error("A Records Datatype can not (yet) be changed after it has been written."); + if (written()) + throw std::runtime_error( + "A Records Datatype can not (yet) be changed after it has been " + "written."); m_dataset->dtype = d; return *this; } BaseRecordComponent::BaseRecordComponent() - : m_dataset{std::make_shared< Dataset >(Dataset(Datatype::UNDEFINED, {}))}, - m_isConstant{std::make_shared< bool >(false)} -{ } + : m_dataset{std::make_shared(Dataset(Datatype::UNDEFINED, {}))} + , m_isConstant{std::make_shared(false)} +{} -Datatype -BaseRecordComponent::getDatatype() const +Datatype BaseRecordComponent::getDatatype() const { return m_dataset->dtype; } -bool -BaseRecordComponent::constant() const +bool BaseRecordComponent::constant() const { return *m_isConstant; } -ChunkTable -BaseRecordComponent::availableChunks() +ChunkTable BaseRecordComponent::availableChunks() { - if( m_isConstant && *m_isConstant ) + if (m_isConstant && *m_isConstant) { - Offset offset( m_dataset->extent.size(), 0 ); - return ChunkTable{ { std::move( offset ), m_dataset->extent } }; + Offset offset(m_dataset->extent.size(), 0); + return ChunkTable{{std::move(offset), m_dataset->extent}}; } containingIteration().open(); - Parameter< Operation::AVAILABLE_CHUNKS > param; - IOTask task( this, param ); - IOHandler()->enqueue( task ); - IOHandler()->flush(); - return std::move( *param.chunks ); + Parameter param; + IOTask task(this, param); + IOHandler()->enqueue(task); + IOHandler()->flush(internal::defaultFlushParams); + return std::move(*param.chunks); } } // namespace openPMD diff --git a/src/backend/Container.cpp b/src/backend/Container.cpp index c47dbbe178..a482ee8d77 100644 --- a/src/backend/Container.cpp +++ b/src/backend/Container.cpp @@ -26,36 +26,36 @@ namespace openPMD { namespace detail { -template<> -std::vector< std::string > keyAsString< std::string const & >( - std::string const & key, std::vector< std::string > const & parentKey ) -{ - if( key == RecordComponent::SCALAR ) - { - auto ret = parentKey; - ret.emplace_back( RecordComponent::SCALAR ); - return ret; - } - else + template <> + std::vector keyAsString( + std::string const &key, std::vector const &parentKey) { - return { key }; + if (key == RecordComponent::SCALAR) + { + auto ret = parentKey; + ret.emplace_back(RecordComponent::SCALAR); + return ret; + } + else + { + return {key}; + } } -} -template<> -std::vector< std::string > keyAsString< std::string >( - std::string && key, std::vector< std::string > const & parentKey ) -{ - if( key == RecordComponent::SCALAR ) - { - auto ret = parentKey; - ret.emplace_back( RecordComponent::SCALAR ); - return ret; - } - else + template <> + std::vector keyAsString( + std::string &&key, std::vector const &parentKey) { - return { std::move( key ) }; + if (key == RecordComponent::SCALAR) + { + auto ret = parentKey; + ret.emplace_back(RecordComponent::SCALAR); + return ret; + } + else + { + return {std::move(key)}; + } } -} -} -} +} // namespace detail +} // namespace openPMD diff --git a/src/backend/MeshRecordComponent.cpp b/src/backend/MeshRecordComponent.cpp index 8c9634a32c..9602868a07 100644 --- a/src/backend/MeshRecordComponent.cpp +++ b/src/backend/MeshRecordComponent.cpp @@ -20,55 +20,51 @@ */ #include "openPMD/backend/MeshRecordComponent.hpp" - namespace openPMD { -MeshRecordComponent::MeshRecordComponent() - : RecordComponent() +MeshRecordComponent::MeshRecordComponent() : RecordComponent() { - setPosition(std::vector< double >{0}); + setPosition(std::vector{0}); } -void -MeshRecordComponent::read() +void MeshRecordComponent::read() { using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "position"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); Attribute a = Attribute(*aRead.resource); - if( *aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT ) - setPosition(a.get< std::vector< float > >()); - else if( *aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE ) - setPosition(a.get< std::vector< double > >()); - else if( *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE ) - setPosition(a.get< std::vector< long double > >()); + if (*aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT) + setPosition(a.get >()); + else if (*aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE) + setPosition(a.get >()); + else if ( + *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE) + setPosition(a.get >()); else - throw std::runtime_error( "Unexpected Attribute datatype for 'position'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'position'"); readBase(); } -template< typename T > -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< T > pos) +template +MeshRecordComponent &MeshRecordComponent::setPosition(std::vector pos) { - static_assert(std::is_floating_point< T >::value, - "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("position", pos); return *this; } -template -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< float > pos); -template -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< double > pos); -template -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< long double > pos); -} // openPMD +template MeshRecordComponent & +MeshRecordComponent::setPosition(std::vector pos); +template MeshRecordComponent & +MeshRecordComponent::setPosition(std::vector pos); +template MeshRecordComponent & +MeshRecordComponent::setPosition(std::vector pos); +} // namespace openPMD diff --git a/src/backend/PatchRecord.cpp b/src/backend/PatchRecord.cpp index 3926677a30..f31b135b62 100644 --- a/src/backend/PatchRecord.cpp +++ b/src/backend/PatchRecord.cpp @@ -18,66 +18,71 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/backend/PatchRecord.hpp" - +#include "openPMD/auxiliary/Memory.hpp" namespace openPMD { -PatchRecord& -PatchRecord::setUnitDimension(std::map< UnitDimension, double > const& udim) +PatchRecord & +PatchRecord::setUnitDimension(std::map const &udim) { - if( !udim.empty() ) + if (!udim.empty()) { - std::array< double, 7 > tmpUnitDimension = this->unitDimension(); - for( auto const& entry : udim ) + std::array tmpUnitDimension = this->unitDimension(); + for (auto const &entry : udim) tmpUnitDimension[static_cast(entry.first)] = entry.second; setAttribute("unitDimension", tmpUnitDimension); } return *this; } -void -PatchRecord::flush_impl(std::string const& path) +void PatchRecord::flush_impl( + std::string const &path, internal::FlushParams const &flushParams) { - if( this->find(RecordComponent::SCALAR) == this->end() ) + if (this->find(RecordComponent::SCALAR) == this->end()) { - if(IOHandler()->m_frontendAccess != Access::READ_ONLY ) - Container< PatchRecordComponent >::flush(path); // warning (clang-tidy-10): bugprone-parent-virtual-call - for( auto& comp : *this ) - comp.second.flush(comp.first); - } else - this->operator[](RecordComponent::SCALAR).flush(path); - if( IOHandler()->m_flushLevel == FlushLevel::UserFlush ) + if (IOHandler()->m_frontendAccess != Access::READ_ONLY) + Container::flush( + path, flushParams); // warning (clang-tidy-10): + // bugprone-parent-virtual-call + for (auto &comp : *this) + comp.second.flush(comp.first, flushParams); + } + else + this->operator[](RecordComponent::SCALAR).flush(path, flushParams); + if (flushParams.flushLevel == FlushLevel::UserFlush) { this->dirty() = false; } } -void -PatchRecord::read() +void PatchRecord::read() { - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "unitDimension"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - if( *aRead.dtype == Datatype::ARR_DBL_7 || *aRead.dtype == Datatype::VEC_DOUBLE ) - this->setAttribute("unitDimension", Attribute(*aRead.resource).template get< std::array< double, 7 > >()); + if (*aRead.dtype == Datatype::ARR_DBL_7 || + *aRead.dtype == Datatype::VEC_DOUBLE) + this->setAttribute( + "unitDimension", + Attribute(*aRead.resource).template get >()); else - throw std::runtime_error("Unexpected Attribute datatype for 'unitDimension'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'unitDimension'"); - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component_name : *dList.datasets ) + Parameter dOpen; + for (auto const &component_name : *dList.datasets) { - PatchRecordComponent& prc = (*this)[component_name]; + PatchRecordComponent &prc = (*this)[component_name]; dOpen.name = component_name; IOHandler()->enqueue(IOTask(&prc, dOpen)); - IOHandler()->flush(); + IOHandler()->flush(internal::defaultFlushParams); /* allow all attributes to be set */ prc.written() = false; prc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); @@ -86,4 +91,4 @@ PatchRecord::read() } dirty() = false; } -} // openPMD +} // namespace openPMD diff --git a/src/backend/PatchRecordComponent.cpp b/src/backend/PatchRecordComponent.cpp index aa97f6327d..3ed031f52b 100644 --- a/src/backend/PatchRecordComponent.cpp +++ b/src/backend/PatchRecordComponent.cpp @@ -18,70 +18,71 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" +#include "openPMD/auxiliary/Memory.hpp" #include - namespace openPMD { -PatchRecordComponent& -PatchRecordComponent::setUnitSI(double usi) +PatchRecordComponent &PatchRecordComponent::setUnitSI(double usi) { setAttribute("unitSI", usi); return *this; } -PatchRecordComponent& -PatchRecordComponent::resetDataset(Dataset d) +PatchRecordComponent &PatchRecordComponent::resetDataset(Dataset d) { - if( written() ) - throw std::runtime_error("A Records Dataset can not (yet) be changed after it has been written."); - if( d.extent.empty() ) - throw std::runtime_error("Dataset extent must be at least 1D."); - if( std::any_of(d.extent.begin(), d.extent.end(), - [](Extent::value_type const& i) { return i == 0u; }) ) - throw std::runtime_error("Dataset extent must not be zero in any dimension."); + if (written()) + throw std::runtime_error( + "A Records Dataset can not (yet) be changed after it has been " + "written."); + if (d.extent.empty()) + throw std::runtime_error("Dataset extent must be at least 1D."); + if (std::any_of( + d.extent.begin(), d.extent.end(), [](Extent::value_type const &i) { + return i == 0u; + })) + throw std::runtime_error( + "Dataset extent must not be zero in any dimension."); *m_dataset = d; dirty() = true; return *this; } -uint8_t -PatchRecordComponent::getDimensionality() const +uint8_t PatchRecordComponent::getDimensionality() const { return 1; } -Extent -PatchRecordComponent::getExtent() const +Extent PatchRecordComponent::getExtent() const { return m_dataset->extent; } PatchRecordComponent::PatchRecordComponent() - : m_chunks{std::make_shared< std::queue< IOTask > >()} + : m_chunks{std::make_shared >()} { setUnitSI(1); } -void -PatchRecordComponent::flush(std::string const& name) +void PatchRecordComponent::flush( + std::string const &name, internal::FlushParams const &flushParams) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - while( !m_chunks->empty() ) + while (!m_chunks->empty()) { IOHandler()->enqueue(m_chunks->front()); m_chunks->pop(); } - } else + } + else { - if( !written() ) + if (!written()) { - Parameter< Operation::CREATE_DATASET > dCreate; + Parameter dCreate; dCreate.name = name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); @@ -92,39 +93,37 @@ PatchRecordComponent::flush(std::string const& name) IOHandler()->enqueue(IOTask(this, dCreate)); } - while( !m_chunks->empty() ) + while (!m_chunks->empty()) { IOHandler()->enqueue(m_chunks->front()); m_chunks->pop(); } - flushAttributes(); + flushAttributes(flushParams); } } -void -PatchRecordComponent::read() +void PatchRecordComponent::read() { - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "unitSI"; IOHandler()->enqueue(IOTask(this, aRead)); - IOHandler()->flush(); - if( *aRead.dtype == Datatype::DOUBLE ) - setUnitSI(Attribute(*aRead.resource).get< double >()); + IOHandler()->flush(internal::defaultFlushParams); + if (*aRead.dtype == Datatype::DOUBLE) + setUnitSI(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'unitSI'"); - readAttributes( ReadMode::FullyReread ); // this will set dirty() = false + readAttributes(ReadMode::FullyReread); // this will set dirty() = false } -bool -PatchRecordComponent::dirtyRecursive() const +bool PatchRecordComponent::dirtyRecursive() const { - if( this->dirty() ) + if (this->dirty()) { return true; } return !m_chunks->empty(); } -} // openPMD +} // namespace openPMD diff --git a/src/backend/Writable.cpp b/src/backend/Writable.cpp index a22a18bf52..0ce92ed003 100644 --- a/src/backend/Writable.cpp +++ b/src/backend/Writable.cpp @@ -22,30 +22,21 @@ #include "openPMD/Series.hpp" #include "openPMD/auxiliary/DerefDynamicCast.hpp" - namespace openPMD { - Writable::Writable(internal::AttributableData* a) - : abstractFilePosition{nullptr}, - IOHandler{nullptr}, - attributable{a}, - parent{nullptr}, - dirty{true}, - written{false} - { } +Writable::Writable(internal::AttributableData *a) : attributable{a} +{} - void - Writable::seriesFlush() - { - seriesFlush( FlushLevel::UserFlush ); - } +void Writable::seriesFlush() +{ + seriesFlush({FlushLevel::UserFlush}); +} - void - Writable::seriesFlush( FlushLevel level ) - { - auto & series = AttributableInterface( attributable ).retrieveSeries(); - series.flush_impl( - series.iterations.begin(), series.iterations.end(), level ); - } +void Writable::seriesFlush(internal::FlushParams flushParams) +{ + auto &series = AttributableInterface(attributable).retrieveSeries(); + series.flush_impl( + series.iterations.begin(), series.iterations.end(), flushParams); +} -} // openPMD +} // namespace openPMD diff --git a/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp b/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp index 97cba7769b..e494b175de 100644 --- a/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp +++ b/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp @@ -23,74 +23,53 @@ #include - namespace openPMD { - OneDimensionalBlockSlicer::OneDimensionalBlockSlicer( Extent::value_type dim ) : - m_dim { dim } - {} +OneDimensionalBlockSlicer::OneDimensionalBlockSlicer(Extent::value_type dim) + : m_dim{dim} +{} +std::pair +OneDimensionalBlockSlicer::sliceBlock(Extent &totalExtent, int size, int rank) +{ + Offset offs(totalExtent.size(), 0); - std::pair< - Offset, - Extent - > OneDimensionalBlockSlicer::sliceBlock( - Extent & totalExtent, - int size, - int rank - ) + if (rank >= size) { - Offset offs( - totalExtent.size( ), - 0 - ); - - if( rank >= size ) - { - Extent extent( - totalExtent.size( ), - 0 - ); - return std::make_pair( - std::move( offs ), - std::move( extent ) - ); - } - - auto dim = this->m_dim; + Extent extent(totalExtent.size(), 0); + return std::make_pair(std::move(offs), std::move(extent)); + } - // for more equal balancing, we want the start index - // at the upper gaussian bracket of (N/n*rank) - // where N the size of the dataset in dimension dim - // and n the MPI size - // for avoiding integer overflow, this is the same as: - // (N div n)*rank + round((N%n)/n*rank) - auto f = [&totalExtent, size, dim]( int threadRank ) - { - auto N = totalExtent[dim]; - auto res = ( N / size ) * threadRank; - auto padDivident = ( N % size ) * threadRank; - auto pad = padDivident / size; - if( pad * size < padDivident ) - { - pad += 1; - } - return res + pad; - }; + auto dim = this->m_dim; - offs[dim] = f( rank ); - Extent localExtent { totalExtent }; - if( rank >= size - 1 ) - { - localExtent[dim] -= offs[dim]; - } - else + // for more equal balancing, we want the start index + // at the upper gaussian bracket of (N/n*rank) + // where N the size of the dataset in dimension dim + // and n the MPI size + // for avoiding integer overflow, this is the same as: + // (N div n)*rank + round((N%n)/n*rank) + auto f = [&totalExtent, size, dim](int threadRank) { + auto N = totalExtent[dim]; + auto res = (N / size) * threadRank; + auto padDivident = (N % size) * threadRank; + auto pad = padDivident / size; + if (pad * size < padDivident) { - localExtent[dim] = f( rank + 1 ) - offs[dim]; + pad += 1; } - return std::make_pair( - std::move( offs ), - std::move( localExtent ) - ); + return res + pad; + }; + + offs[dim] = f(rank); + Extent localExtent{totalExtent}; + if (rank >= size - 1) + { + localExtent[dim] -= offs[dim]; + } + else + { + localExtent[dim] = f(rank + 1) - offs[dim]; } + return std::make_pair(std::move(offs), std::move(localExtent)); } +} // namespace openPMD diff --git a/src/binding/python/Access.cpp b/src/binding/python/Access.cpp index 0b1c274c7c..338f42db25 100644 --- a/src/binding/python/Access.cpp +++ b/src/binding/python/Access.cpp @@ -26,11 +26,10 @@ namespace py = pybind11; using namespace openPMD; - -void init_Access(py::module &m) { +void init_Access(py::module &m) +{ py::enum_(m, "Access") .value("read_only", Access::READ_ONLY) .value("read_write", Access::READ_WRITE) - .value("create", Access::CREATE) - ; + .value("create", Access::CREATE); } diff --git a/src/binding/python/Attributable.cpp b/src/binding/python/Attributable.cpp index c5aaafb61c..179efe69e0 100644 --- a/src/binding/python/Attributable.cpp +++ b/src/binding/python/Attributable.cpp @@ -19,15 +19,15 @@ * If not, see . */ #include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Attribute.hpp" +#include "openPMD/DatatypeHelpers.hpp" #include "openPMD/auxiliary/Variant.hpp" +#include "openPMD/backend/Attribute.hpp" #include "openPMD/binding/python/Numpy.hpp" #include "openPMD/binding/python/Variant.hpp" -#include "openPMD/DatatypeHelpers.hpp" #include -#include #include +#include #include #include @@ -36,18 +36,15 @@ #include #include - namespace py = pybind11; using namespace openPMD; -using PyAttributeKeys = std::vector< std::string >; -//PYBIND11_MAKE_OPAQUE(PyAttributeKeys) +using PyAttributeKeys = std::vector; +// PYBIND11_MAKE_OPAQUE(PyAttributeKeys) bool setAttributeFromBufferInfo( - Attributable & attr, - std::string const& key, - py::buffer& a -) { + Attributable &attr, std::string const &key, py::buffer &a) +{ using DT = Datatype; py::buffer_info buf = a.request(); @@ -59,68 +56,78 @@ bool setAttributeFromBufferInfo( // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 // scalars, see PEP 3118 // requires Numpy 1.15+ - if( buf.ndim == 0 ) { + if (buf.ndim == 0) + { // refs: // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html // https://docs.python.org/3/library/struct.html#format-characters // std::cout << " scalar type '" << buf.format << "'" << std::endl; // typestring: encoding + type + number of bytes - switch( dtype_from_bufferformat( buf.format ) ) + switch (dtype_from_bufferformat(buf.format)) { - case DT::BOOL: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::SHORT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::INT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::LONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::LONGLONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::USHORT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::UINT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::ULONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::ULONGLONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::FLOAT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::DOUBLE: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::LONG_DOUBLE: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::CFLOAT: - return attr.setAttribute( key, *static_cast*>(buf.ptr) ); - break; - case DT::CDOUBLE: - return attr.setAttribute( key, *static_cast*>(buf.ptr) ); - break; - case DT::CLONG_DOUBLE: - return attr.setAttribute( key, *static_cast*>(buf.ptr) ); - break; - default: - throw std::runtime_error("set_attribute: Unknown " - "Python type '" + buf.format + - "' for attribute '" + key + "'"); + case DT::BOOL: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::SHORT: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::INT: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::LONG: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::LONGLONG: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::USHORT: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::UINT: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::ULONG: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::ULONGLONG: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::FLOAT: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::DOUBLE: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::LONG_DOUBLE: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::CFLOAT: + return attr.setAttribute( + key, *static_cast *>(buf.ptr)); + break; + case DT::CDOUBLE: + return attr.setAttribute( + key, *static_cast *>(buf.ptr)); + break; + case DT::CLONG_DOUBLE: + return attr.setAttribute( + key, *static_cast *>(buf.ptr)); + break; + default: + throw std::runtime_error( + "set_attribute: Unknown " + "Python type '" + + buf.format + "' for attribute '" + key + "'"); } return false; } // lists & ndarrays: all will be flattended to 1D lists - else { + else + { // std::cout << " array type '" << buf.format << "'" << std::endl; /* required are contiguous buffers @@ -128,18 +135,18 @@ bool setAttributeFromBufferInfo( * - not strided with paddings * - not a view in another buffer that results in striding */ - auto* view = new Py_buffer(); + auto *view = new Py_buffer(); int flags = PyBUF_STRIDES | PyBUF_FORMAT; - if( PyObject_GetBuffer( a.ptr(), view, flags ) != 0 ) + if (PyObject_GetBuffer(a.ptr(), view, flags) != 0) { delete view; throw py::error_already_set(); } - bool isContiguous = ( PyBuffer_IsContiguous( view, 'A' ) != 0 ); - PyBuffer_Release( view ); + bool isContiguous = (PyBuffer_IsContiguous(view, 'A') != 0); + PyBuffer_Release(view); delete view; - if( !isContiguous ) + if (!isContiguous) throw py::index_error( "non-contiguous buffer provided, handling not implemented!"); // @todo in order to implement stride handling, one needs to @@ -155,106 +162,108 @@ bool setAttributeFromBufferInfo( ) ); else */ // std::cout << "+++++++++++ BUFFER: " << buf.format << std::endl; - if( buf.format.find("b") != std::string::npos ) - return attr.setAttribute( key, + if (buf.format.find("b") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("h") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("h") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("i") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("i") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("l") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("l") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("q") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("q") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("B") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("B") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("H") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("H") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("I") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("I") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("L") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("L") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Q") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("Q") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Zf") != std::string::npos ) - return attr.setAttribute( key, - std::vector>( - static_cast*>(buf.ptr), - static_cast*>(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Zd") != std::string::npos ) - return attr.setAttribute( key, - std::vector>( - static_cast*>(buf.ptr), - static_cast*>(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Zg") != std::string::npos ) - return attr.setAttribute( key, - std::vector>( - static_cast*>(buf.ptr), - static_cast*>(buf.ptr) + buf.size - ) ); - else if( buf.format.find("f") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("Zf") != std::string::npos) + return attr.setAttribute( + key, + std::vector>( + static_cast *>(buf.ptr), + static_cast *>(buf.ptr) + buf.size)); + else if (buf.format.find("Zd") != std::string::npos) + return attr.setAttribute( + key, + std::vector>( + static_cast *>(buf.ptr), + static_cast *>(buf.ptr) + buf.size)); + else if (buf.format.find("Zg") != std::string::npos) + return attr.setAttribute( + key, + std::vector>( + static_cast *>(buf.ptr), + static_cast *>(buf.ptr) + + buf.size)); + else if (buf.format.find("f") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("d") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("d") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("g") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("g") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); else - throw std::runtime_error("set_attribute: Unknown " - "Python type '" + buf.format + - "' for attribute '" + key + "'"); + throw std::runtime_error( + "set_attribute: Unknown " + "Python type '" + + buf.format + "' for attribute '" + key + "'"); return false; } @@ -264,146 +273,137 @@ struct SetAttributeFromObject { std::string errorMsg = "Attributable.set_attribute()"; - template< typename RequestedType > - bool operator()( - Attributable & attr, - std::string const& key, - py::object& obj ) + template + bool operator()(Attributable &attr, std::string const &key, py::object &obj) { - if( std::string( py::str( obj.get_type() ) ) == "" ) + if (std::string(py::str(obj.get_type())) == "") { - using ListType = std::vector< RequestedType >; - return attr.setAttribute< ListType >( key, obj.cast< ListType >() ); + using ListType = std::vector; + return attr.setAttribute(key, obj.cast()); } else { - return attr.setAttribute< RequestedType >( - key, obj.cast< RequestedType >() ); + return attr.setAttribute( + key, obj.cast()); } } }; -template<> -bool SetAttributeFromObject::operator()< double >( - Attributable & attr, std::string const & key, py::object & obj ) +template <> +bool SetAttributeFromObject::operator()( + Attributable &attr, std::string const &key, py::object &obj) { - if( std::string( py::str( obj.get_type() ) ) == "" ) + if (std::string(py::str(obj.get_type())) == "") { - using ListType = std::vector< double >; - using ArrayType = std::array< double, 7 >; - ListType const & asVector = obj.cast< ListType >(); - if( asVector.size() == 7 && key == "unitDimension" ) + using ListType = std::vector; + using ArrayType = std::array; + ListType const &asVector = obj.cast(); + if (asVector.size() == 7 && key == "unitDimension") { ArrayType asArray; - std::copy_n( asVector.begin(), 7, asArray.begin() ); - return attr.setAttribute< ArrayType >( key, asArray ); + std::copy_n(asVector.begin(), 7, asArray.begin()); + return attr.setAttribute(key, asArray); } else { - return attr.setAttribute< ListType >( key, asVector ); + return attr.setAttribute(key, asVector); } } else { - return attr.setAttribute< double >( key, obj.cast< double >() ); + return attr.setAttribute(key, obj.cast()); } } -template<> -bool SetAttributeFromObject::operator()< bool >( - Attributable & attr, std::string const & key, py::object & obj ) +template <> +bool SetAttributeFromObject::operator()( + Attributable &attr, std::string const &key, py::object &obj) { - return attr.setAttribute< bool >( key, obj.cast< bool >() ); + return attr.setAttribute(key, obj.cast()); } -template<> -bool SetAttributeFromObject::operator()< char >( - Attributable & attr, std::string const & key, py::object & obj ) +template <> +bool SetAttributeFromObject::operator()( + Attributable &attr, std::string const &key, py::object &obj) { - if( std::string( py::str( obj.get_type() ) ) == "" ) + if (std::string(py::str(obj.get_type())) == "") { - using ListChar = std::vector< char >; - using ListString = std::vector< std::string >; + using ListChar = std::vector; + using ListString = std::vector; try { - return attr.setAttribute< ListString >( - key, obj.cast< ListString >() ); + return attr.setAttribute(key, obj.cast()); } - catch( const py::cast_error & ) + catch (const py::cast_error &) { - return attr.setAttribute< ListChar >( key, obj.cast< ListChar >() ); + return attr.setAttribute(key, obj.cast()); } } - else if( std::string( py::str( obj.get_type() ) ) == "" ) + else if (std::string(py::str(obj.get_type())) == "") { - return attr.setAttribute< std::string >( - key, obj.cast< std::string >() ); + return attr.setAttribute(key, obj.cast()); } else { - return attr.setAttribute< char >( key, obj.cast< char >() ); + return attr.setAttribute(key, obj.cast()); } } bool setAttributeFromObject( - Attributable & attr, - std::string const & key, - py::object & obj, - pybind11::dtype datatype ) + Attributable &attr, + std::string const &key, + py::object &obj, + pybind11::dtype datatype) { - Datatype requestedDatatype = dtype_from_numpy( datatype ); + Datatype requestedDatatype = dtype_from_numpy(datatype); static SetAttributeFromObject safo; - return switchNonVectorType( requestedDatatype, safo, attr, key, obj ); + return switchNonVectorType(requestedDatatype, safo, attr, key, obj); } -void init_Attributable(py::module &m) { +void init_Attributable(py::module &m) +{ py::class_(m, "Attributable") .def(py::init()) - .def("__repr__", - [](Attributable const & attr) { - return ""; - } - ) - .def("series_flush", py::overload_cast< >(&Attributable::seriesFlush)) + .def( + "__repr__", + [](Attributable const &attr) { + return ""; + }) + .def("series_flush", py::overload_cast<>(&Attributable::seriesFlush)) .def_property_readonly( "attributes", - []( Attributable & attr ) - { - return attr.attributes(); - }, + [](Attributable &attr) { return attr.attributes(); }, // ref + keepalive - py::return_value_policy::reference_internal - ) + py::return_value_policy::reference_internal) // C++ pass-through API: Setter // note that the order of overloads is important! - // all buffer protocol compatible objects, including numpy arrays if not specialized specifically... - .def("set_attribute", []( Attributable & attr, std::string const& key, py::buffer& a ) { - // std::cout << "set attr via py::buffer: " << key << std::endl; - return setAttributeFromBufferInfo( - attr, - key, - a - ); - }) - .def("set_attribute", []( - Attributable & attr, - std::string const& key, - py::object& obj, - pybind11::dtype datatype ) - { - return setAttributeFromObject( attr, key, obj, datatype ); - }, + // all buffer protocol compatible objects, including numpy arrays if not + // specialized specifically... + .def( + "set_attribute", + [](Attributable &attr, std::string const &key, py::buffer &a) { + // std::cout << "set attr via py::buffer: " << key << std::endl; + return setAttributeFromBufferInfo(attr, key, a); + }) + .def( + "set_attribute", + [](Attributable &attr, + std::string const &key, + py::object &obj, + pybind11::dtype datatype) { + return setAttributeFromObject(attr, key, obj, datatype); + }, py::arg("key"), py::arg("value"), - py::arg("datatype") - ) + py::arg("datatype")) // fundamental Python types - .def("set_attribute", &Attributable::setAttribute< bool >) - .def("set_attribute", &Attributable::setAttribute< unsigned char >) + .def("set_attribute", &Attributable::setAttribute) + .def("set_attribute", &Attributable::setAttribute) // -> handle all native python integers as long // .def("set_attribute", &Attributable::setAttribute< short >) // .def("set_attribute", &Attributable::setAttribute< int >) @@ -412,63 +412,83 @@ void init_Attributable(py::module &m) { // .def("set_attribute", &Attributable::setAttribute< unsigned short >) // .def("set_attribute", &Attributable::setAttribute< unsigned int >) // .def("set_attribute", &Attributable::setAttribute< unsigned long >) - // .def("set_attribute", &Attributable::setAttribute< unsigned long long >) - .def("set_attribute", &Attributable::setAttribute< long >) + // .def("set_attribute", &Attributable::setAttribute< unsigned long long + // >) + .def("set_attribute", &Attributable::setAttribute) // work-around for https://github.com/pybind/pybind11/issues/1512 // -> handle all native python floats as double // .def("set_attribute", &Attributable::setAttribute< float >) // .def("set_attribute", &Attributable::setAttribute< long double >) - .def("set_attribute", &Attributable::setAttribute< double >) + .def("set_attribute", &Attributable::setAttribute) // work-around for https://github.com/pybind/pybind11/issues/1509 // -> since there is only str in Python, chars are strings // .def("set_attribute", &Attributable::setAttribute< char >) - .def("set_attribute", []( Attributable & attr, std::string const& key, std::string const& value ) { - return attr.setAttribute( key, value ); - }) + .def( + "set_attribute", + [](Attributable &attr, + std::string const &key, + std::string const &value) { + return attr.setAttribute(key, value); + }) - // Plain Python arrays and plain python lists of homogeneous, fundamental Python types - // not specialized in C++ API - // .def("set_attribute", &Attributable::setAttribute< std::vector< bool > >) - // there is only str in Python, chars are strings - // .def("set_attribute", &Attributable::setAttribute< std::vector< char > >) - .def("set_attribute", &Attributable::setAttribute< std::vector< unsigned char > >) - .def("set_attribute", &Attributable::setAttribute< std::vector< long > >) - .def("set_attribute", &Attributable::setAttribute< std::vector< double > >) // TODO: this implicitly casts list of complex - // probably affected by bug https://github.com/pybind/pybind11/issues/1258 - .def("set_attribute", []( Attributable & attr, std::string const& key, std::vector< std::string > const& value ) { - return attr.setAttribute( key, value ); - }) - // .def("set_attribute", &Attributable::setAttribute< std::array< double, 7 > >) + // Plain Python arrays and plain python lists of homogeneous, + // fundamental Python types not specialized in C++ API + // .def("set_attribute", &Attributable::setAttribute< std::vector< bool + // > >) there is only str in Python, chars are strings + // .def("set_attribute", &Attributable::setAttribute< std::vector< char + // > >) + .def( + "set_attribute", + &Attributable::setAttribute>) + .def("set_attribute", &Attributable::setAttribute>) + .def( + "set_attribute", + &Attributable::setAttribute>) // TODO: this implicitly casts list of complex + // probably affected by bug + // https://github.com/pybind/pybind11/issues/1258 + .def( + "set_attribute", + [](Attributable &attr, + std::string const &key, + std::vector const &value) { + return attr.setAttribute(key, value); + }) + // .def("set_attribute", &Attributable::setAttribute< std::array< + // double, 7 > >) // C++ pass-through API: Getter - .def("get_attribute", []( Attributable & attr, std::string const& key ) { - auto v = attr.getAttribute(key); - return v.getResource(); - // TODO instead of returning lists, return all arrays (ndim > 0) as numpy arrays? - }) - .def_property_readonly("attribute_dtypes", []( Attributable const & attributable ) { - std::map< std::string, pybind11::dtype > dtypes; - for( auto const & attr : attributable.attributes() ) - { - dtypes[ attr ] = - dtype_to_numpy( attributable.getAttribute( attr ).dtype ); - } - return dtypes; - }) + .def( + "get_attribute", + [](Attributable &attr, std::string const &key) { + auto v = attr.getAttribute(key); + return v.getResource(); + // TODO instead of returning lists, return all arrays (ndim > 0) + // as numpy arrays? + }) + .def_property_readonly( + "attribute_dtypes", + [](Attributable const &attributable) { + std::map dtypes; + for (auto const &attr : attributable.attributes()) + { + dtypes[attr] = + dtype_to_numpy(attributable.getAttribute(attr).dtype); + } + return dtypes; + }) .def("delete_attribute", &Attributable::deleteAttribute) .def("contains_attribute", &Attributable::containsAttribute) .def("__len__", &Attributable::numAttributes) - // @todo _ipython_key_completions_ if we find a way to add a [] interface + // @todo _ipython_key_completions_ if we find a way to add a [] + // interface - .def_property("comment", &Attributable::comment, &Attributable::setComment) + .def_property( + "comment", &Attributable::comment, &Attributable::setComment) // TODO remove in future versions (deprecated) - .def("set_comment", &Attributable::setComment) - ; + .def("set_comment", &Attributable::setComment); - py::bind_vector< PyAttributeKeys >( - m, - "Attribute_Keys" - ); + py::bind_vector(m, "Attribute_Keys"); } diff --git a/src/binding/python/BaseRecord.cpp b/src/binding/python/BaseRecord.cpp index e3a1e6f57c..e893b54174 100644 --- a/src/binding/python/BaseRecord.cpp +++ b/src/binding/python/BaseRecord.cpp @@ -22,29 +22,44 @@ #include #include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/Container.hpp" #include "openPMD/backend/MeshRecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/binding/python/UnitDimension.hpp" namespace py = pybind11; using namespace openPMD; - -void init_BaseRecord(py::module &m) { +void init_BaseRecord(py::module &m) +{ constexpr auto doc_scalar = R"docstr( Returns true if this record only contains a single component. )docstr"; - py::class_, Container< BaseRecordComponent > >(m, "Base_Record_Base_Record_Component") - .def_property_readonly("unit_dimension", &BaseRecord< BaseRecordComponent >::unitDimension, python::doc_unit_dimension) - .def_property_readonly("scalar", &BaseRecord< BaseRecordComponent >::scalar, doc_scalar); + py::class_< + BaseRecord, + Container >(m, "Base_Record_Base_Record_Component") + .def_property_readonly( + "unit_dimension", + &BaseRecord::unitDimension, + python::doc_unit_dimension) + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); - py::class_, Container< RecordComponent > >(m, "Base_Record_Record_Component") - .def_property_readonly("scalar", &BaseRecord< RecordComponent >::scalar, doc_scalar); - py::class_, Container< MeshRecordComponent > >(m, "Base_Record_Mesh_Record_Component") - .def_property_readonly("scalar", &BaseRecord< MeshRecordComponent >::scalar, doc_scalar); - py::class_, Container< PatchRecordComponent > >(m, "Base_Record_Patch_Record_Component") - .def_property_readonly("scalar", &BaseRecord< PatchRecordComponent >::scalar, doc_scalar); + py::class_, Container >( + m, "Base_Record_Record_Component") + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); + py::class_< + BaseRecord, + Container >(m, "Base_Record_Mesh_Record_Component") + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); + py::class_< + BaseRecord, + Container >( + m, "Base_Record_Patch_Record_Component") + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); } diff --git a/src/binding/python/BaseRecordComponent.cpp b/src/binding/python/BaseRecordComponent.cpp index 8b2747477f..0a3a306e76 100644 --- a/src/binding/python/BaseRecordComponent.cpp +++ b/src/binding/python/BaseRecordComponent.cpp @@ -18,12 +18,12 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include #include +#include #include -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" #include @@ -31,25 +31,24 @@ namespace py = pybind11; using namespace openPMD; - -void init_BaseRecordComponent(py::module &m) { +void init_BaseRecordComponent(py::module &m) +{ py::class_(m, "Base_Record_Component") - .def("__repr__", - [](BaseRecordComponent const & brc) { + .def( + "__repr__", + [](BaseRecordComponent const &brc) { std::stringstream ss; ss << ""; return ss.str(); - } - ) + }) .def("reset_datatype", &BaseRecordComponent::resetDatatype) .def("available_chunks", &BaseRecordComponent::availableChunks) .def_property_readonly("unit_SI", &BaseRecordComponent::unitSI) .def_property_readonly("constant", &BaseRecordComponent::constant) - .def_property_readonly("dtype", [](BaseRecordComponent & brc) { - return dtype_to_numpy( brc.getDatatype() ); - }) - ; + .def_property_readonly("dtype", [](BaseRecordComponent &brc) { + return dtype_to_numpy(brc.getDatatype()); + }); } diff --git a/src/binding/python/ChunkInfo.cpp b/src/binding/python/ChunkInfo.cpp index 6a8aff6286..5edc29353b 100644 --- a/src/binding/python/ChunkInfo.cpp +++ b/src/binding/python/ChunkInfo.cpp @@ -29,34 +29,34 @@ namespace py = pybind11; using namespace openPMD; - -void init_Chunk(py::module &m) { +void init_Chunk(py::module &m) +{ py::class_(m, "ChunkInfo") - .def(py::init(), - py::arg("offset"), py::arg("extent")) - .def("__repr__", - [](const ChunkInfo & c) { - return ""; - } - ) + .def(py::init(), py::arg("offset"), py::arg("extent")) + .def( + "__repr__", + [](const ChunkInfo &c) { + return ""; + }) .def_readwrite("offset", &ChunkInfo::offset) - .def_readwrite("extent", &ChunkInfo::extent) - ; + .def_readwrite("extent", &ChunkInfo::extent); py::class_(m, "WrittenChunkInfo") - .def(py::init(), - py::arg("offset"), py::arg("extent")) - .def(py::init(), - py::arg("offset"), py::arg("extent"), py::arg("rank")) - .def("__repr__", - [](const WrittenChunkInfo & c) { - return ""; - } - ) - .def_readwrite("offset", &WrittenChunkInfo::offset ) - .def_readwrite("extent", &WrittenChunkInfo::extent ) - .def_readwrite("source_id", &WrittenChunkInfo::sourceID ) + .def(py::init(), py::arg("offset"), py::arg("extent")) + .def( + py::init(), + py::arg("offset"), + py::arg("extent"), + py::arg("rank")) + .def( + "__repr__", + [](const WrittenChunkInfo &c) { + return ""; + }) + .def_readwrite("offset", &WrittenChunkInfo::offset) + .def_readwrite("extent", &WrittenChunkInfo::extent) + .def_readwrite("source_id", &WrittenChunkInfo::sourceID) .def(py::pickle( // __getstate__ @@ -70,13 +70,11 @@ void init_Chunk(py::module &m) { if (t.size() != 3) throw std::runtime_error("Invalid state!"); - auto const offset = t[0].cast< Offset >(); - auto const extent = t[1].cast< Extent >(); - auto const sourceID = t[2].cast< decltype(WrittenChunkInfo::sourceID) >(); + auto const offset = t[0].cast(); + auto const extent = t[1].cast(); + auto const sourceID = + t[2].cast(); return WrittenChunkInfo(offset, extent, sourceID); - } - )) - ; + })); } - diff --git a/src/binding/python/Container.cpp b/src/binding/python/Container.cpp index 7bfeddceae..357cb0bba2 100644 --- a/src/binding/python/Container.cpp +++ b/src/binding/python/Container.cpp @@ -25,195 +25,126 @@ */ #include -#include #include +#include -#include "openPMD/backend/Container.hpp" -#include "openPMD/backend/BaseRecord.hpp" -#include "openPMD/backend/MeshRecordComponent.hpp" -#include "openPMD/backend/PatchRecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" -#include "openPMD/backend/PatchRecord.hpp" #include "openPMD/Iteration.hpp" #include "openPMD/Mesh.hpp" -#include "openPMD/ParticleSpecies.hpp" #include "openPMD/ParticlePatches.hpp" +#include "openPMD/ParticleSpecies.hpp" #include "openPMD/Record.hpp" +#include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" +#include "openPMD/backend/Container.hpp" +#include "openPMD/backend/MeshRecordComponent.hpp" +#include "openPMD/backend/PatchRecord.hpp" +#include "openPMD/backend/PatchRecordComponent.hpp" -#include #include +#include #include namespace py = pybind11; using namespace openPMD; - namespace detail { - /* based on std_bind.h in pybind11 - * - * Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob - * - * BSD-style license, see pybind11 LICENSE file. - */ - template< - typename Map, - typename holder_type = std::unique_ptr< Map >, - typename... Args - > - py::class_< - Map, - holder_type, - Attributable - > bind_container( - py::handle scope, - std::string const & name, - Args && ... args - ) +/* based on std_bind.h in pybind11 + * + * Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob + * + * BSD-style license, see pybind11 LICENSE file. + */ +template < + typename Map, + typename holder_type = std::unique_ptr, + typename... Args> +py::class_ +bind_container(py::handle scope, std::string const &name, Args &&...args) +{ + using KeyType = typename Map::key_type; + using MappedType = typename Map::mapped_type; + using Class_ = py::class_; + + // If either type is a non-module-local bound type then make the map + // binding non-local as well; otherwise (e.g. both types are either + // module-local or converting) the map will be module-local. + auto tinfo = py::detail::get_type_info(typeid(MappedType)); + bool local = !tinfo || tinfo->module_local; + if (local) { - using KeyType = typename Map::key_type; - using MappedType = typename Map::mapped_type; - using Class_ = py::class_< - Map, - holder_type, - Attributable - >; - - // If either type is a non-module-local bound type then make the map - // binding non-local as well; otherwise (e.g. both types are either - // module-local or converting) the map will be module-local. - auto tinfo = py::detail::get_type_info( typeid( MappedType ) ); - bool local = !tinfo || tinfo->module_local; - if( local ) { - tinfo = py::detail::get_type_info( typeid( KeyType ) ); - local = !tinfo || tinfo->module_local; - } - - Class_ cl( - scope, - name.c_str(), - py::module_local( local ), - std::forward< Args >( args ) ... - ); - - cl.def( py::init() ); - - // Register stream insertion operator (if possible) - py::detail::map_if_insertion_operator< - Map, - Class_ - >( - cl, - name - ); - - cl.def( - "__bool__", - []( const Map & m ) - -> bool - { - return !m.empty(); - }, - "Check whether the container is nonempty" - ); - - cl.def( - "__iter__", - []( Map & m ) - { - return py::make_key_iterator( - m.begin(), - m.end() - ); - }, - // keep container alive while iterator exists - py::keep_alive< - 0, - 1 - >() - ); - - cl.def( - "items", - []( Map & m ) - { - return py::make_iterator( - m.begin(), - m.end() - ); - }, - // keep container alive while iterator exists - py::keep_alive< - 0, - 1 - >() - ); - - // keep same policy as Container class: missing keys are created - cl.def( - "__getitem__", - []( - Map & m, - KeyType const & k - ) -> MappedType & { - return m[ k ]; - }, - // ref + keepalive - py::return_value_policy::reference_internal - ); - - // Assignment provided only if the type is copyable - py::detail::map_assignment< - Map, - Class_ - >( cl ); - - cl.def( - "__delitem__", - []( - Map &m, - KeyType const & k - ) { - auto it = m.find( k ); - if( it == m.end() ) - throw py::key_error(); - m.erase( it ); - } - ); - - cl.def( - "__len__", - &Map::size - ); - - cl.def( - "_ipython_key_completions_", - []( Map & m ) { - auto l = py::list(); - for( const auto &myPair : m ) - l.append( myPair.first ); - return l; - } - ); - - return cl; + tinfo = py::detail::get_type_info(typeid(KeyType)); + local = !tinfo || tinfo->module_local; } -} // namespace detail + Class_ cl( + scope, + name.c_str(), + py::module_local(local), + std::forward(args)...); + + cl.def(py::init()); + + // Register stream insertion operator (if possible) + py::detail::map_if_insertion_operator(cl, name); + + cl.def( + "__bool__", + [](const Map &m) -> bool { return !m.empty(); }, + "Check whether the container is nonempty"); + + cl.def( + "__iter__", + [](Map &m) { return py::make_key_iterator(m.begin(), m.end()); }, + // keep container alive while iterator exists + py::keep_alive<0, 1>()); + + cl.def( + "items", + [](Map &m) { return py::make_iterator(m.begin(), m.end()); }, + // keep container alive while iterator exists + py::keep_alive<0, 1>()); + + // keep same policy as Container class: missing keys are created + cl.def( + "__getitem__", + [](Map &m, KeyType const &k) -> MappedType & { return m[k]; }, + // ref + keepalive + py::return_value_policy::reference_internal); + + // Assignment provided only if the type is copyable + py::detail::map_assignment(cl); + + cl.def("__delitem__", [](Map &m, KeyType const &k) { + auto it = m.find(k); + if (it == m.end()) + throw py::key_error(); + m.erase(it); + }); + + cl.def("__len__", &Map::size); + + cl.def("_ipython_key_completions_", [](Map &m) { + auto l = py::list(); + for (const auto &myPair : m) + l.append(myPair.first); + return l; + }); + + return cl; +} +} // namespace detail -using PyIterationContainer = Container< - Iteration, - uint64_t ->; -using PyMeshContainer = Container< Mesh >; -using PyPartContainer = Container< ParticleSpecies >; -using PyPatchContainer = Container< ParticlePatches >; -using PyRecordContainer = Container< Record >; -using PyPatchRecordContainer = Container< PatchRecord >; -using PyRecordComponentContainer = Container< RecordComponent >; -using PyMeshRecordComponentContainer = Container< MeshRecordComponent >; -using PyPatchRecordComponentContainer = Container< PatchRecordComponent >; -using PyBaseRecordComponentContainer = Container< BaseRecordComponent >; +using PyIterationContainer = Container; +using PyMeshContainer = Container; +using PyPartContainer = Container; +using PyPatchContainer = Container; +using PyRecordContainer = Container; +using PyPatchRecordContainer = Container; +using PyRecordComponentContainer = Container; +using PyMeshRecordComponentContainer = Container; +using PyPatchRecordComponentContainer = Container; +using PyBaseRecordComponentContainer = Container; PYBIND11_MAKE_OPAQUE(PyIterationContainer) PYBIND11_MAKE_OPAQUE(PyMeshContainer) PYBIND11_MAKE_OPAQUE(PyPartContainer) @@ -225,45 +156,21 @@ PYBIND11_MAKE_OPAQUE(PyMeshRecordComponentContainer) PYBIND11_MAKE_OPAQUE(PyPatchRecordComponentContainer) PYBIND11_MAKE_OPAQUE(PyBaseRecordComponentContainer) -void init_Container( py::module & m ) { - ::detail::bind_container< PyIterationContainer >( - m, - "Iteration_Container" - ); - ::detail::bind_container< PyMeshContainer >( - m, - "Mesh_Container" - ); - ::detail::bind_container< PyPartContainer >( - m, - "Particle_Container" - ); - ::detail::bind_container< PyPatchContainer >( - m, - "Particle_Patches_Container" - ); - ::detail::bind_container< PyRecordContainer >( - m, - "Record_Container" - ); - ::detail::bind_container< PyPatchRecordContainer >( - m, - "Patch_Record_Container" - ); - ::detail::bind_container< PyRecordComponentContainer >( - m, - "Record_Component_Container" - ); - ::detail::bind_container< PyMeshRecordComponentContainer >( - m, - "Mesh_Record_Component_Container" - ); - ::detail::bind_container< PyPatchRecordComponentContainer >( - m, - "Patch_Record_Component_Container" - ); - ::detail::bind_container< PyBaseRecordComponentContainer >( - m, - "Base_Record_Component_Container" - ); +void init_Container(py::module &m) +{ + ::detail::bind_container(m, "Iteration_Container"); + ::detail::bind_container(m, "Mesh_Container"); + ::detail::bind_container(m, "Particle_Container"); + ::detail::bind_container(m, "Particle_Patches_Container"); + ::detail::bind_container(m, "Record_Container"); + ::detail::bind_container( + m, "Patch_Record_Container"); + ::detail::bind_container( + m, "Record_Component_Container"); + ::detail::bind_container( + m, "Mesh_Record_Component_Container"); + ::detail::bind_container( + m, "Patch_Record_Component_Container"); + ::detail::bind_container( + m, "Base_Record_Component_Container"); } diff --git a/src/binding/python/Dataset.cpp b/src/binding/python/Dataset.cpp index d18ceb8556..e4f6a1e5f7 100644 --- a/src/binding/python/Dataset.cpp +++ b/src/binding/python/Dataset.cpp @@ -29,35 +29,39 @@ namespace py = pybind11; using namespace openPMD; - -void init_Dataset(py::module &m) { +void init_Dataset(py::module &m) +{ py::class_(m, "Dataset") - .def(py::init(), - py::arg("dtype"), py::arg("extent") - ) + .def(py::init(), py::arg("dtype"), py::arg("extent")) .def(py::init(), py::arg("extent")) - .def(py::init( [](py::dtype dt, Extent e) { - auto const d = dtype_from_numpy( dt ); - return new Dataset{d, e}; - }), - py::arg("dtype"), py::arg("extent") - ) - .def(py::init(), - py::arg("dtype"), py::arg("extent"), py::arg("options") - ) - .def(py::init( [](py::dtype dt, Extent e, std::string options) { - auto const d = dtype_from_numpy( dt ); - return new Dataset{d, e, std::move(options)}; - }), - py::arg("dtype"), py::arg("extent"), py::arg("options") - ) + .def( + py::init([](py::dtype dt, Extent e) { + auto const d = dtype_from_numpy(dt); + return new Dataset{d, e}; + }), + py::arg("dtype"), + py::arg("extent")) + .def( + py::init(), + py::arg("dtype"), + py::arg("extent"), + py::arg("options")) + .def( + py::init([](py::dtype dt, Extent e, std::string options) { + auto const d = dtype_from_numpy(dt); + return new Dataset{d, e, std::move(options)}; + }), + py::arg("dtype"), + py::arg("extent"), + py::arg("options")) - .def("__repr__", + .def( + "__repr__", [](const Dataset &d) { - return ""; - } - ) + return ""; + }) .def_readonly("extent", &Dataset::extent) .def("extend", &Dataset::extend) @@ -68,10 +72,7 @@ void init_Dataset(py::module &m) { .def_readonly("transform", &Dataset::transform) .def("set_custom_transform", &Dataset::setCustomTransform) .def_readonly("rank", &Dataset::rank) - .def_property_readonly("dtype", [](const Dataset &d) { - return dtype_to_numpy( d.dtype ); - }) - .def_readwrite("options", &Dataset::options) - ; + .def_property_readonly( + "dtype", [](const Dataset &d) { return dtype_to_numpy(d.dtype); }) + .def_readwrite("options", &Dataset::options); } - diff --git a/src/binding/python/Datatype.cpp b/src/binding/python/Datatype.cpp index bb81444409..1af2977443 100644 --- a/src/binding/python/Datatype.cpp +++ b/src/binding/python/Datatype.cpp @@ -27,8 +27,8 @@ namespace py = pybind11; using namespace openPMD; - -void init_Datatype(py::module &m) { +void init_Datatype(py::module &m) +{ py::enum_(m, "Datatype", py::arithmetic()) .value("CHAR", Datatype::CHAR) .value("UCHAR", Datatype::UCHAR) @@ -61,13 +61,12 @@ void init_Datatype(py::module &m) { .value("ARR_DBL_7", Datatype::ARR_DBL_7) .value("BOOL", Datatype::BOOL) .value("DATATYPE", Datatype::DATATYPE) - .value("UNDEFINED", Datatype::UNDEFINED) - ; + .value("UNDEFINED", Datatype::UNDEFINED); m.def("determine_datatype", [](py::dtype const dt) { - return dtype_from_numpy( dt ); + return dtype_from_numpy(dt); }); - m.def("determine_datatype", [](py::array const & a) { - return dtype_from_numpy( a.dtype() ); + m.def("determine_datatype", [](py::array const &a) { + return dtype_from_numpy(a.dtype()); }); } diff --git a/src/binding/python/Helper.cpp b/src/binding/python/Helper.cpp index 64da843ac2..9b07a37f70 100644 --- a/src/binding/python/Helper.cpp +++ b/src/binding/python/Helper.cpp @@ -21,33 +21,31 @@ #include #include +#include "openPMD/Series.hpp" #include "openPMD/cli/ls.hpp" #include "openPMD/helper/list_series.hpp" -#include "openPMD/Series.hpp" -#include #include +#include #include - namespace py = pybind11; using namespace openPMD; -void init_Helper(py::module &m) { - m.def("list_series", - [](Series & series, bool const longer) { - std::stringstream s; - helper::listSeries( series, longer, s ); - py::print(s.str()); - }, - py::arg("series"), - py::arg_v("longer", false, "Print more verbose output."), - "List information about an openPMD data series" - ) - // CLI entry point - .def("_ls_run", // &cli::ls::run - [](std::vector< std::string > & argv) { - return cli::ls::run( argv ); - } - ); +void init_Helper(py::module &m) +{ + m.def( + "list_series", + [](Series &series, bool const longer) { + std::stringstream s; + helper::listSeries(series, longer, s); + py::print(s.str()); + }, + py::arg("series"), + py::arg_v("longer", false, "Print more verbose output."), + "List information about an openPMD data series") + // CLI entry point + .def( + "_ls_run", // &cli::ls::run + [](std::vector &argv) { return cli::ls::run(argv); }); } diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index ec85f4fd9a..480de17e7e 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -23,29 +23,41 @@ #include "openPMD/Iteration.hpp" +#include +#include #include namespace py = pybind11; using namespace openPMD; - -void init_Iteration(py::module &m) { +void init_Iteration(py::module &m) +{ py::class_(m, "Iteration") .def(py::init()) - .def("__repr__", - [](Iteration const & it) { - return ""; - } - ) + .def( + "__repr__", + [](Iteration const &it) { + std::stringstream ss; + ss << ""; + return ss.str(); + }) - .def_property("time", &Iteration::time, &Iteration::setTime) - .def_property("time", &Iteration::time, &Iteration::setTime) - .def_property("time", &Iteration::time, &Iteration::setTime) + .def_property( + "time", &Iteration::time, &Iteration::setTime) + .def_property( + "time", &Iteration::time, &Iteration::setTime) + .def_property( + "time", + &Iteration::time, + &Iteration::setTime) .def_property("dt", &Iteration::dt, &Iteration::setDt) .def_property("dt", &Iteration::dt, &Iteration::setDt) - .def_property("dt", &Iteration::dt, &Iteration::setDt) - .def_property("time_unit_SI", &Iteration::timeUnitSI, &Iteration::setTimeUnitSI) + .def_property( + "dt", &Iteration::dt, &Iteration::setDt) + .def_property( + "time_unit_SI", &Iteration::timeUnitSI, &Iteration::setTimeUnitSI) .def("open", &Iteration::open) .def("close", &Iteration::close, py::arg("flush") = true) @@ -58,13 +70,16 @@ void init_Iteration(py::module &m) { .def("set_dt", &Iteration::setDt) .def("set_time_unit_SI", &Iteration::setTimeUnitSI) - .def_readwrite("meshes", &Iteration::meshes, + .def_readwrite( + "meshes", + &Iteration::meshes, py::return_value_policy::reference, // garbage collection: return value must be freed before Iteration py::keep_alive<1, 0>()) - .def_readwrite("particles", &Iteration::particles, + .def_readwrite( + "particles", + &Iteration::particles, py::return_value_policy::reference, // garbage collection: return value must be freed before Iteration - py::keep_alive<1, 0>()) - ; + py::keep_alive<1, 0>()); } diff --git a/src/binding/python/IterationEncoding.cpp b/src/binding/python/IterationEncoding.cpp index a4edc7c03c..479ef65555 100644 --- a/src/binding/python/IterationEncoding.cpp +++ b/src/binding/python/IterationEncoding.cpp @@ -26,11 +26,10 @@ namespace py = pybind11; using namespace openPMD; - -void init_IterationEncoding(py::module &m) { +void init_IterationEncoding(py::module &m) +{ py::enum_(m, "Iteration_Encoding") .value("file_based", IterationEncoding::fileBased) .value("group_based", IterationEncoding::groupBased) - .value("variable_based", IterationEncoding::variableBased) - ; + .value("variable_based", IterationEncoding::variableBased); } diff --git a/src/binding/python/Mesh.cpp b/src/binding/python/Mesh.cpp index 3ff67e9771..43b078b9d8 100644 --- a/src/binding/python/Mesh.cpp +++ b/src/binding/python/Mesh.cpp @@ -24,8 +24,8 @@ #include "openPMD/Mesh.hpp" #include "openPMD/backend/BaseRecord.hpp" #include "openPMD/backend/MeshRecordComponent.hpp" -#include "openPMD/binding/python/UnitDimension.hpp" #include "openPMD/binding/python/Pickle.hpp" +#include "openPMD/binding/python/UnitDimension.hpp" #include #include @@ -33,45 +33,79 @@ namespace py = pybind11; using namespace openPMD; - -void init_Mesh(py::module &m) { +void init_Mesh(py::module &m) +{ py::class_ > cl(m, "Mesh"); - cl - .def(py::init()) + cl.def(py::init()) - .def("__repr__", - [](Mesh const & mesh) { - return ""; - } - ) + .def( + "__repr__", + [](Mesh const &mesh) { + return ""; + }) - .def_property("unit_dimension", + .def_property( + "unit_dimension", &Mesh::unitDimension, &Mesh::setUnitDimension, python::doc_unit_dimension) - .def_property("geometry", &Mesh::geometry, py::overload_cast(&Mesh::setGeometry)) .def_property( - "geometry_string", &Mesh::geometryString, py::overload_cast(&Mesh::setGeometry)) - .def_property("geometry_parameters", &Mesh::geometryParameters, &Mesh::setGeometryParameters) - .def_property("data_order", - [](Mesh const & mesh){ return static_cast< char >(mesh.dataOrder()); }, - [](Mesh & mesh, char d){ mesh.setDataOrder(Mesh::DataOrder(d)); }, - "Data Order of the Mesh (deprecated and set to C in openPMD 2)" - ) + "geometry", + &Mesh::geometry, + py::overload_cast(&Mesh::setGeometry)) + .def_property( + "geometry_string", + &Mesh::geometryString, + py::overload_cast(&Mesh::setGeometry)) + .def_property( + "geometry_parameters", + &Mesh::geometryParameters, + &Mesh::setGeometryParameters) + .def_property( + "data_order", + [](Mesh const &mesh) { + return static_cast(mesh.dataOrder()); + }, + [](Mesh &mesh, char d) { mesh.setDataOrder(Mesh::DataOrder(d)); }, + "Data Order of the Mesh (deprecated and set to C in openPMD 2)") .def_property("axis_labels", &Mesh::axisLabels, &Mesh::setAxisLabels) - .def_property("grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property("grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property("grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property("grid_global_offset", &Mesh::gridGlobalOffset, &Mesh::setGridGlobalOffset) + .def_property( + "grid_spacing", + &Mesh::gridSpacing, + &Mesh::setGridSpacing) + .def_property( + "grid_spacing", + &Mesh::gridSpacing, + &Mesh::setGridSpacing) + .def_property( + "grid_spacing", + &Mesh::gridSpacing, + &Mesh::setGridSpacing) + .def_property( + "grid_global_offset", + &Mesh::gridGlobalOffset, + &Mesh::setGridGlobalOffset) .def_property("grid_unit_SI", &Mesh::gridUnitSI, &Mesh::setGridUnitSI) - .def_property("time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) - .def_property("time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) - .def_property("time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) + .def_property( + "time_offset", + &Mesh::timeOffset, + &Mesh::setTimeOffset) + .def_property( + "time_offset", + &Mesh::timeOffset, + &Mesh::setTimeOffset) + .def_property( + "time_offset", + &Mesh::timeOffset, + &Mesh::setTimeOffset) // TODO remove in future versions (deprecated) .def("set_unit_dimension", &Mesh::setUnitDimension) - .def("set_geometry", py::overload_cast(&Mesh::setGeometry)) + .def( + "set_geometry", + py::overload_cast(&Mesh::setGeometry)) .def("set_geometry", py::overload_cast(&Mesh::setGeometry)) .def("set_geometry_parameters", &Mesh::setGeometryParameters) .def("set_axis_labels", &Mesh::setAxisLabels) @@ -79,21 +113,17 @@ void init_Mesh(py::module &m) { .def("set_grid_spacing", &Mesh::setGridSpacing) .def("set_grid_spacing", &Mesh::setGridSpacing) .def("set_grid_global_offset", &Mesh::setGridGlobalOffset) - .def("set_grid_unit_SI", &Mesh::setGridUnitSI) - ; + .def("set_grid_unit_SI", &Mesh::setGridUnitSI); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].meshes[group.at(3)]; - } - ); + }); py::enum_(m, "Geometry") .value("cartesian", Mesh::Geometry::cartesian) .value("thetaMode", Mesh::Geometry::thetaMode) .value("cylindrical", Mesh::Geometry::cylindrical) .value("spherical", Mesh::Geometry::spherical) - .value("other", Mesh::Geometry::other) - ; + .value("other", Mesh::Geometry::other); } diff --git a/src/binding/python/MeshRecordComponent.cpp b/src/binding/python/MeshRecordComponent.cpp index dea58425e0..ff702b53d5 100644 --- a/src/binding/python/MeshRecordComponent.cpp +++ b/src/binding/python/MeshRecordComponent.cpp @@ -21,9 +21,9 @@ #include #include -#include "openPMD/backend/MeshRecordComponent.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Series.hpp" +#include "openPMD/backend/MeshRecordComponent.hpp" #include "openPMD/binding/python/Pickle.hpp" #include @@ -32,35 +32,38 @@ namespace py = pybind11; using namespace openPMD; +void init_MeshRecordComponent(py::module &m) +{ + py::class_ cl( + m, "Mesh_Record_Component"); + cl.def( + "__repr__", + [](MeshRecordComponent const &rc) { + return ""; + }) -void init_MeshRecordComponent(py::module &m) { - py::class_ cl(m, "Mesh_Record_Component"); - cl - .def("__repr__", - [](MeshRecordComponent const & rc) { - return ""; - } - ) - - .def_property("position", + .def_property( + "position", &MeshRecordComponent::position, &MeshRecordComponent::setPosition, - "Relative position of the component on an element (node/cell/voxel) of the mesh") - .def_property("position", + "Relative position of the component on an element " + "(node/cell/voxel) of the mesh") + .def_property( + "position", &MeshRecordComponent::position, &MeshRecordComponent::setPosition, - "Relative position of the component on an element (node/cell/voxel) of the mesh") - .def_property("position", + "Relative position of the component on an element " + "(node/cell/voxel) of the mesh") + .def_property( + "position", &MeshRecordComponent::position, &MeshRecordComponent::setPosition, - "Relative position of the component on an element (node/cell/voxel) of the mesh") - ; + "Relative position of the component on an element " + "(node/cell/voxel) of the mesh"); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].meshes[group.at(3)][group.at(4)]; - } - ); + }); } diff --git a/src/binding/python/ParticlePatches.cpp b/src/binding/python/ParticlePatches.cpp index 3950080861..7c52652717 100644 --- a/src/binding/python/ParticlePatches.cpp +++ b/src/binding/python/ParticlePatches.cpp @@ -22,23 +22,23 @@ #include #include "openPMD/ParticlePatches.hpp" -#include "openPMD/backend/PatchRecord.hpp" #include "openPMD/backend/Container.hpp" +#include "openPMD/backend/PatchRecord.hpp" #include namespace py = pybind11; using namespace openPMD; +void init_ParticlePatches(py::module &m) +{ + py::class_ >(m, "Particle_Patches") + .def( + "__repr__", + [](ParticlePatches const &pp) { + return ""; + }) -void init_ParticlePatches(py::module &m) { - py::class_ >(m, "Particle_Patches") - .def("__repr__", - [](ParticlePatches const & pp) { - return ""; - } - ) - - .def_property_readonly("num_patches", &ParticlePatches::numPatches) - ; + .def_property_readonly("num_patches", &ParticlePatches::numPatches); } diff --git a/src/binding/python/ParticleSpecies.cpp b/src/binding/python/ParticleSpecies.cpp index deaf0c6a8f..b6929ad66f 100644 --- a/src/binding/python/ParticleSpecies.cpp +++ b/src/binding/python/ParticleSpecies.cpp @@ -33,23 +33,17 @@ namespace py = pybind11; using namespace openPMD; +void init_ParticleSpecies(py::module &m) +{ + py::class_ > cl(m, "ParticleSpecies"); + cl.def( + "__repr__", + [](ParticleSpecies const &) { return ""; }) -void init_ParticleSpecies(py::module &m) { - py::class_ > cl(m, "ParticleSpecies"); - cl - .def("__repr__", - [](ParticleSpecies const &) { - return ""; - } - ) - - .def_readwrite("particle_patches", &ParticleSpecies::particlePatches) - ; + .def_readwrite("particle_patches", &ParticleSpecies::particlePatches); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].particles[group.at(3)]; - } - ); + }); } diff --git a/src/binding/python/PatchRecord.cpp b/src/binding/python/PatchRecord.cpp index 191ff9fc6f..7d01add8ea 100644 --- a/src/binding/python/PatchRecord.cpp +++ b/src/binding/python/PatchRecord.cpp @@ -21,23 +21,24 @@ #include #include +#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/backend/PatchRecord.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" -#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/binding/python/UnitDimension.hpp" namespace py = pybind11; using namespace openPMD; - -void init_PatchRecord(py::module &m) { - py::class_ >(m, "Patch_Record") - .def_property("unit_dimension", - &PatchRecord::unitDimension, - &PatchRecord::setUnitDimension, - python::doc_unit_dimension) +void init_PatchRecord(py::module &m) +{ + py::class_ >( + m, "Patch_Record") + .def_property( + "unit_dimension", + &PatchRecord::unitDimension, + &PatchRecord::setUnitDimension, + python::doc_unit_dimension) // TODO remove in future versions (deprecated) - .def("set_unit_dimension", &PatchRecord::setUnitDimension) - ; + .def("set_unit_dimension", &PatchRecord::setUnitDimension); } diff --git a/src/binding/python/PatchRecordComponent.cpp b/src/binding/python/PatchRecordComponent.cpp index 4d9cc5f151..cecd57b252 100644 --- a/src/binding/python/PatchRecordComponent.cpp +++ b/src/binding/python/PatchRecordComponent.cpp @@ -21,148 +21,181 @@ #include #include -#include "openPMD/backend/PatchRecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" +#include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" namespace py = pybind11; using namespace openPMD; - -void init_PatchRecordComponent(py::module &m) { - py::class_(m, "Patch_Record_Component") - .def_property("unit_SI", &BaseRecordComponent::unitSI, &PatchRecordComponent::setUnitSI) +void init_PatchRecordComponent(py::module &m) +{ + py::class_( + m, "Patch_Record_Component") + .def_property( + "unit_SI", + &BaseRecordComponent::unitSI, + &PatchRecordComponent::setUnitSI) .def("reset_dataset", &PatchRecordComponent::resetDataset) - .def_property_readonly("ndims", &PatchRecordComponent::getDimensionality) + .def_property_readonly( + "ndims", &PatchRecordComponent::getDimensionality) .def_property_readonly("shape", &PatchRecordComponent::getExtent) - .def("load", [](PatchRecordComponent & prc) { + .def( + "load", + [](PatchRecordComponent &prc) { + auto const dtype = dtype_to_numpy(prc.getDatatype()); + auto a = py::array(dtype, prc.getExtent()[0]); - auto const dtype = dtype_to_numpy( prc.getDatatype() ); - auto a = py::array( dtype, prc.getExtent()[0] ); + if (prc.getDatatype() == Datatype::CHAR) + prc.load(shareRaw((char *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::UCHAR) + prc.load( + shareRaw((unsigned char *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::SHORT) + prc.load(shareRaw((short *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::INT) + prc.load(shareRaw((int *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::LONG) + prc.load(shareRaw((long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::LONGLONG) + prc.load( + shareRaw((long long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::USHORT) + prc.load( + shareRaw((unsigned short *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::UINT) + prc.load( + shareRaw((unsigned int *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::ULONG) + prc.load( + shareRaw((unsigned long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::ULONGLONG) + prc.load( + shareRaw((unsigned long long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::LONG_DOUBLE) + prc.load( + shareRaw((long double *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::DOUBLE) + prc.load(shareRaw((double *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::FLOAT) + prc.load(shareRaw((float *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::BOOL) + prc.load(shareRaw((bool *)a.mutable_data())); + else + throw std::runtime_error( + std::string("Datatype not known in 'load'!")); - if( prc.getDatatype() == Datatype::CHAR ) - prc.load(shareRaw((char*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::UCHAR ) - prc.load(shareRaw((unsigned char*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::SHORT ) - prc.load(shareRaw((short*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::INT ) - prc.load(shareRaw((int*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::LONG ) - prc.load(shareRaw((long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::LONGLONG ) - prc.load(shareRaw((long long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::USHORT ) - prc.load(shareRaw((unsigned short*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::UINT ) - prc.load(shareRaw((unsigned int*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::ULONG ) - prc.load(shareRaw((unsigned long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::ULONGLONG ) - prc.load(shareRaw((unsigned long long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::LONG_DOUBLE ) - prc.load(shareRaw((long double*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::DOUBLE ) - prc.load(shareRaw((double*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::FLOAT ) - prc.load(shareRaw((float*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::BOOL ) - prc.load(shareRaw((bool*) a.mutable_data())); - else - throw std::runtime_error(std::string("Datatype not known in 'load'!")); - - return a; - }) + return a; + }) // all buffer types - .def("store", [](PatchRecordComponent & prc, uint64_t idx, py::buffer a) { - py::buffer_info buf = a.request(); - auto const dtype = dtype_from_bufferformat( buf.format ); + .def( + "store", + [](PatchRecordComponent &prc, uint64_t idx, py::buffer a) { + py::buffer_info buf = a.request(); + auto const dtype = dtype_from_bufferformat(buf.format); - using DT = Datatype; + using DT = Datatype; - // allow one-element n-dimensional buffers as well - py::ssize_t numElements = 1; - if( buf.ndim > 0 ) { - for( auto d = 0; d < buf.ndim; ++d ) - numElements *= buf.shape.at(d); - } + // allow one-element n-dimensional buffers as well + py::ssize_t numElements = 1; + if (buf.ndim > 0) + { + for (auto d = 0; d < buf.ndim; ++d) + numElements *= buf.shape.at(d); + } - // Numpy: Handling of arrays and scalars - // work-around for https://github.com/pybind/pybind11/issues/1224 - // -> passing numpy scalars as buffers needs numpy 1.15+ - // https://github.com/numpy/numpy/issues/10265 - // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 - // scalars, see PEP 3118 - // requires Numpy 1.15+ - if( numElements == 1 ) { - // refs: - // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html - // https://docs.python.org/3/library/struct.html#format-characters - // std::cout << " scalar type '" << buf.format << "'" << std::endl; - // typestring: encoding + type + number of bytes - switch( dtype ) + // Numpy: Handling of arrays and scalars + // work-around for + // https://github.com/pybind/pybind11/issues/1224 + // -> passing numpy scalars as buffers needs numpy 1.15+ + // https://github.com/numpy/numpy/issues/10265 + // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 + // scalars, see PEP 3118 + // requires Numpy 1.15+ + if (numElements == 1) { + // refs: + // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html + // https://docs.python.org/3/library/struct.html#format-characters + // std::cout << " scalar type '" << buf.format << "'" << + // std::endl; typestring: encoding + type + number of bytes + switch (dtype) + { case DT::BOOL: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::SHORT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::INT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::LONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::LONGLONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::USHORT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::UINT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::ULONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::ULONGLONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::FLOAT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::DOUBLE: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::LONG_DOUBLE: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; default: - throw std::runtime_error("store: " + throw std::runtime_error( + "store: " "Unknown Datatype!"); + } + } + else + { + throw std::runtime_error( + "store: " + "Only scalar values supported!"); } - } - else - { - throw std::runtime_error("store: " - "Only scalar values supported!"); - } - }, py::arg("idx"), py::arg("data") - ) + }, + py::arg("idx"), + py::arg("data")) // allowed python intrinsics, after (!) buffer matching - .def("store", &PatchRecordComponent::store, - py::arg("idx"), py::arg("data")) - .def("store", &PatchRecordComponent::store, - py::arg("idx"), py::arg("data")) + .def( + "store", + &PatchRecordComponent::store, + py::arg("idx"), + py::arg("data")) + .def( + "store", + &PatchRecordComponent::store, + py::arg("idx"), + py::arg("data")) // TODO implement convenient, patch-object level store/load // TODO remove in future versions (deprecated) - .def("set_unit_SI", &PatchRecordComponent::setUnitSI) - ; + .def("set_unit_SI", &PatchRecordComponent::setUnitSI); } diff --git a/src/binding/python/Record.cpp b/src/binding/python/Record.cpp index e442b05e36..39ad120c6e 100644 --- a/src/binding/python/Record.cpp +++ b/src/binding/python/Record.cpp @@ -22,10 +22,10 @@ #include #include "openPMD/Record.hpp" -#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/RecordComponent.hpp" -#include "openPMD/binding/python/UnitDimension.hpp" +#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/binding/python/Pickle.hpp" +#include "openPMD/binding/python/UnitDimension.hpp" #include #include @@ -33,38 +33,40 @@ namespace py = pybind11; using namespace openPMD; +void init_Record(py::module &m) +{ + py::class_ > cl(m, "Record"); + cl.def(py::init()) -void init_Record(py::module &m) { - py::class_ > cl(m, "Record"); - cl - .def(py::init()) - - .def("__repr__", - [](Record const &) { - return ""; - } - ) + .def("__repr__", [](Record const &) { return ""; }) - .def_property("unit_dimension", - &Record::unitDimension, - &Record::setUnitDimension, - python::doc_unit_dimension) + .def_property( + "unit_dimension", + &Record::unitDimension, + &Record::setUnitDimension, + python::doc_unit_dimension) - .def_property("time_offset", &Record::timeOffset, &Record::setTimeOffset) - .def_property("time_offset", &Record::timeOffset, &Record::setTimeOffset) - .def_property("time_offset", &Record::timeOffset, &Record::setTimeOffset) + .def_property( + "time_offset", + &Record::timeOffset, + &Record::setTimeOffset) + .def_property( + "time_offset", + &Record::timeOffset, + &Record::setTimeOffset) + .def_property( + "time_offset", + &Record::timeOffset, + &Record::setTimeOffset) // TODO remove in future versions (deprecated) .def("set_unit_dimension", &Record::setUnitDimension) .def("set_time_offset", &Record::setTimeOffset) .def("set_time_offset", &Record::setTimeOffset) - .def("set_time_offset", &Record::setTimeOffset) - ; + .def("set_time_offset", &Record::setTimeOffset); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].particles[group.at(3)][group.at(4)]; - } - ); + }); } diff --git a/src/binding/python/RecordComponent.cpp b/src/binding/python/RecordComponent.cpp index 4b735f5fff..3975a25c99 100644 --- a/src/binding/python/RecordComponent.cpp +++ b/src/binding/python/RecordComponent.cpp @@ -18,16 +18,16 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include #include +#include #include +#include "openPMD/DatatypeHelpers.hpp" #include "openPMD/RecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" +#include "openPMD/Series.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" -#include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/Series.hpp" #include "openPMD/binding/python/Pickle.hpp" #include @@ -44,14 +44,14 @@ namespace py = pybind11; using namespace openPMD; - /** Convert a py::tuple of py::slices to Offset & Extent * * https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.indexing.html * https://github.com/numpy/numpy/blob/v1.16.1/numpy/core/src/multiarray/mapping.c#L348-L375 */ -inline std::tuple< Offset, Extent, std::vector > -parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const & slices) { +inline std::tuple> parseTupleSlices( + uint8_t const ndim, Extent const &full_extent, py::tuple const &slices) +{ uint8_t const numSlices = py::len(slices); Offset offset(ndim, 0u); @@ -60,27 +60,24 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const int16_t curAxis = -1; int16_t posEllipsis = -1; - for( uint8_t i = 0u; i < numSlices; ++i ) + for (uint8_t i = 0u; i < numSlices; ++i) { ++curAxis; - if( - i >= ndim && - posEllipsis == -1 && - slices[i].ptr() != Py_Ellipsis - ) + if (i >= ndim && posEllipsis == -1 && slices[i].ptr() != Py_Ellipsis) throw py::index_error( "too many indices for dimension of record component!"); - if( slices[i].ptr() == Py_Ellipsis ) + if (slices[i].ptr() == Py_Ellipsis) { // only allowed once - if( posEllipsis != -1 ) - throw py::index_error("an index can only have a single ellipsis ('...')"); + if (posEllipsis != -1) + throw py::index_error( + "an index can only have a single ellipsis ('...')"); posEllipsis = curAxis; // might be omitted if all other indices are given as well - if( numSlices == ndim + 1 ) + if (numSlices == ndim + 1) { --curAxis; continue; @@ -88,19 +85,17 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const // how many slices were given after the ellipsis uint8_t const numSlicesAfterEllipsis = - numSlices - - uint8_t(posEllipsis) - - 1u; + numSlices - uint8_t(posEllipsis) - 1u; // how many slices does the ellipsis represent - uint8_t const numSlicesEllipsis = - numSlices - - uint8_t(posEllipsis) // slices before - - numSlicesAfterEllipsis; // slices after + uint8_t const numSlicesEllipsis = numSlices - + uint8_t(posEllipsis) // slices before + - numSlicesAfterEllipsis; // slices after // fill ellipsis indices // note: if enough further indices are given, the ellipsis // might stand for no axis: valid and ignored - for( ; curAxis < posEllipsis + int16_t(numSlicesEllipsis); ++curAxis ) + for (; curAxis < posEllipsis + int16_t(numSlicesEllipsis); + ++curAxis) { offset.at(curAxis) = 0; extent.at(curAxis) = full_extent.at(curAxis); @@ -110,21 +105,29 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const continue; } - if( PySlice_Check( slices[i].ptr() ) ) + if (PySlice_Check(slices[i].ptr())) { - py::slice slice = py::cast< py::slice >( slices[i] ); + py::slice slice = py::cast(slices[i]); size_t start, stop, step, slicelength; - if( !slice.compute( full_extent.at(curAxis), &start, &stop, &step, &slicelength ) ) + if (!slice.compute( + full_extent.at(curAxis), + &start, + &stop, + &step, + &slicelength)) throw py::error_already_set(); // TODO PySlice_AdjustIndices: Python 3.6.1+ - // Adjust start/end slice indices assuming a sequence of the specified length. - // Out of bounds indices are clipped in a manner consistent with the handling of normal slices. - // slicelength = PySlice_AdjustIndices(full_extent[curAxis], (ssize_t*)&start, (ssize_t*)&stop, step); + // Adjust start/end slice indices assuming a sequence of the + // specified length. Out of bounds indices are clipped in a + // manner consistent with the handling of normal slices. + // slicelength = PySlice_AdjustIndices(full_extent[curAxis], + // (ssize_t*)&start, (ssize_t*)&stop, step); - if( step != 1u ) - throw py::index_error("strides in selection are inefficient, not implemented!"); + if (step != 1u) + throw py::index_error( + "strides in selection are inefficient, not implemented!"); // verified for size later in C++ API offset.at(curAxis) = start; @@ -135,9 +138,9 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const try { - auto const index = py::cast< std::int64_t >( slices[i] ); + auto const index = py::cast(slices[i]); - if( index < 0 ) + if (index < 0) offset.at(curAxis) = full_extent.at(curAxis) + index; else offset.at(curAxis) = index; @@ -145,23 +148,21 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const extent.at(curAxis) = 1; flatten.at(curAxis) = true; // indices flatten the dimension - if( offset.at(curAxis) >= full_extent.at(curAxis) ) + if (offset.at(curAxis) >= full_extent.at(curAxis)) throw py::index_error( - std::string("index ") + - std::to_string( offset.at(curAxis) ) + + std::string("index ") + std::to_string(offset.at(curAxis)) + std::string(" is out of bounds for axis ") + - std::to_string(i) + - std::string(" with size ") + - std::to_string(full_extent.at(curAxis)) - ); + std::to_string(i) + std::string(" with size ") + + std::to_string(full_extent.at(curAxis))); continue; } - catch (const py::cast_error& e) { + catch (const py::cast_error &e) + { // not an index } - if( slices[i].ptr() == Py_None ) + if (slices[i].ptr() == Py_None) { // py::none newaxis = py::cast< py::none >( slices[i] );; throw py::index_error("None (newaxis) not implemented!"); @@ -177,7 +178,7 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const } // fill omitted higher indices with "select all" - for( ++curAxis; curAxis < int16_t(ndim); ++curAxis ) + for (++curAxis; curAxis < int16_t(ndim); ++curAxis) { extent.at(curAxis) = full_extent.at(curAxis); } @@ -192,23 +193,23 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const * - not strided with paddings * - not a view in another buffer that results in striding */ -inline void -check_buffer_is_contiguous( py::array & a ) { +inline void check_buffer_is_contiguous(py::array &a) +{ - auto* view = new Py_buffer(); + auto *view = new Py_buffer(); int flags = PyBUF_STRIDES | PyBUF_FORMAT; - if( PyObject_GetBuffer( a.ptr(), view, flags ) != 0 ) + if (PyObject_GetBuffer(a.ptr(), view, flags) != 0) { delete view; throw py::error_already_set(); } - bool isContiguous = ( PyBuffer_IsContiguous( view, 'A' ) != 0 ); - PyBuffer_Release( view ); + bool isContiguous = (PyBuffer_IsContiguous(view, 'A') != 0); + PyBuffer_Release(view); delete view; - if( !isContiguous ) + if (!isContiguous) throw py::index_error( - "strides in chunk are inefficient, not implemented!"); + "strides in chunk are inefficient, not implemented!"); // @todo in order to implement stride handling, one needs to // loop over the input data strides in store/load calls } @@ -221,121 +222,126 @@ check_buffer_is_contiguous( py::array & a ) { * Size checks of the requested chunk (spanned data is in valid bounds) * will be performed at C++ API part in RecordComponent::storeChunk . */ -inline void -store_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent const & extent, std::vector const & flatten) { +inline void store_chunk( + RecordComponent &r, + py::array &a, + Offset const &offset, + Extent const &extent, + std::vector const &flatten) +{ // @todo keep locked until flush() is performed // a.flags.writable = false; // a.flags.owndata = false; // verify offset + extend fit in dataset extent - // some one-size dimensions might be flattended in our r due to selections by index - size_t const numFlattenDims = std::count(flatten.begin(), flatten.end(), true); + // some one-size dimensions might be flattended in our r due to selections + // by index + size_t const numFlattenDims = + std::count(flatten.begin(), flatten.end(), true); auto const r_extent = r.getExtent(); - auto const s_extent(extent); // selected extent in r - std::vector< std::uint64_t > r_shape(r_extent.size() - numFlattenDims); - std::vector< std::uint64_t > s_shape(s_extent.size() - numFlattenDims); + auto const s_extent(extent); // selected extent in r + std::vector r_shape(r_extent.size() - numFlattenDims); + std::vector s_shape(s_extent.size() - numFlattenDims); auto maskIt = flatten.begin(); std::copy_if( std::begin(r_extent), std::end(r_extent), std::begin(r_shape), - [&maskIt](std::uint64_t){ - return !*(maskIt++); - } - ); + [&maskIt](std::uint64_t) { return !*(maskIt++); }); maskIt = flatten.begin(); std::copy_if( std::begin(s_extent), std::end(s_extent), std::begin(s_shape), - [&maskIt](std::uint64_t){ - return !*(maskIt++); - } - ); + [&maskIt](std::uint64_t) { return !*(maskIt++); }); // verify shape and extent - if( size_t(a.ndim()) != r_shape.size() ) + if (size_t(a.ndim()) != r_shape.size()) throw py::index_error( - std::string("dimension of chunk (") + - std::to_string(a.ndim()) + + std::string("dimension of chunk (") + std::to_string(a.ndim()) + std::string("D) does not fit dimension of selection " "in record component (") + - std::to_string(r_shape.size()) + - std::string("D)") - ); + std::to_string(r_shape.size()) + std::string("D)")); - for( auto d = 0; d < a.ndim(); ++d ) + for (auto d = 0; d < a.ndim(); ++d) { // selection causes overflow of r - if( offset.at(d) + extent.at(d) > r_shape.at(d) ) + if (offset.at(d) + extent.at(d) > r_shape.at(d)) throw py::index_error( - std::string("slice ") + - std::to_string( offset.at(d) ) + - std::string(":") + - std::to_string( extent.at(d) ) + - std::string(" is out of bounds for axis ") + - std::to_string(d) + - std::string(" with size ") + - std::to_string(r_shape.at(d)) - ); + std::string("slice ") + std::to_string(offset.at(d)) + + std::string(":") + std::to_string(extent.at(d)) + + std::string(" is out of bounds for axis ") + std::to_string(d) + + std::string(" with size ") + std::to_string(r_shape.at(d))); // underflow of selection in r for given a - if( s_shape.at(d) != std::uint64_t(a.shape()[d]) ) + if (s_shape.at(d) != std::uint64_t(a.shape()[d])) throw py::index_error( - std::string("size of chunk (") + - std::to_string( a.shape()[d] ) + - std::string(") for axis ") + - std::to_string(d) + + std::string("size of chunk (") + std::to_string(a.shape()[d]) + + std::string(") for axis ") + std::to_string(d) + std::string(" does not match selection ") + std::string("size in record component (") + - std::to_string( s_extent.at(d) ) + - std::string(")") - ); + std::to_string(s_extent.at(d)) + std::string(")")); } - check_buffer_is_contiguous( a ); + check_buffer_is_contiguous(a); // here, we increase a reference on the user-passed data so that // temporary and lost-scope variables stay alive until we flush // note: this does not yet prevent the user, as in C++, to build // a race condition by manipulating the data they passed - auto store_data = [ &r, &a, &offset, &extent ]( auto cxxtype ) { + auto store_data = [&r, &a, &offset, &extent](auto cxxtype) { using CXXType = decltype(cxxtype); a.inc_ref(); - void* data = a.mutable_data(); - std::shared_ptr< CXXType > shared( ( CXXType * )data, - [ a ]( CXXType * ) { a.dec_ref(); } ); - r.storeChunk( std::move( shared ), offset, extent ); + void *data = a.mutable_data(); + std::shared_ptr shared( + (CXXType *)data, [a](CXXType *) { a.dec_ref(); }); + r.storeChunk(std::move(shared), offset, extent); }; // store - auto const dtype = dtype_from_numpy( a.dtype() ); - if( dtype == Datatype::CHAR ) store_data( char() ); - else if( dtype == Datatype::UCHAR ) store_data( (unsigned char)0 ); - else if( dtype == Datatype::SHORT ) store_data( short() ); - else if( dtype == Datatype::INT ) store_data( int() ); - else if( dtype == Datatype::LONG ) store_data( long() ); - else if( dtype == Datatype::LONGLONG ) store_data( (long long)0 ); - else if( dtype == Datatype::USHORT ) store_data( (unsigned short)0 ); - else if( dtype == Datatype::UINT ) store_data( (unsigned int)0 ); - else if( dtype == Datatype::ULONG ) store_data( (unsigned long)0 ); - else if( dtype == Datatype::ULONGLONG ) store_data( (unsigned long long)0 ); - else if( dtype == Datatype::LONG_DOUBLE ) store_data( (long double)0 ); - else if( dtype == Datatype::DOUBLE ) store_data( double() ); - else if( dtype == Datatype::FLOAT ) store_data( float() ); - else if( dtype == Datatype::CLONG_DOUBLE ) store_data( std::complex() ); - else if( dtype == Datatype::CDOUBLE ) store_data( std::complex() ); - else if( dtype == Datatype::CFLOAT ) store_data( std::complex() ); -/* @todo -.value("STRING", Datatype::STRING) -.value("VEC_STRING", Datatype::VEC_STRING) -.value("ARR_DBL_7", Datatype::ARR_DBL_7) -*/ - else if( dtype == Datatype::BOOL ) store_data( bool() ); + auto const dtype = dtype_from_numpy(a.dtype()); + if (dtype == Datatype::CHAR) + store_data(char()); + else if (dtype == Datatype::UCHAR) + store_data((unsigned char)0); + else if (dtype == Datatype::SHORT) + store_data(short()); + else if (dtype == Datatype::INT) + store_data(int()); + else if (dtype == Datatype::LONG) + store_data(long()); + else if (dtype == Datatype::LONGLONG) + store_data((long long)0); + else if (dtype == Datatype::USHORT) + store_data((unsigned short)0); + else if (dtype == Datatype::UINT) + store_data((unsigned int)0); + else if (dtype == Datatype::ULONG) + store_data((unsigned long)0); + else if (dtype == Datatype::ULONGLONG) + store_data((unsigned long long)0); + else if (dtype == Datatype::LONG_DOUBLE) + store_data((long double)0); + else if (dtype == Datatype::DOUBLE) + store_data(double()); + else if (dtype == Datatype::FLOAT) + store_data(float()); + else if (dtype == Datatype::CLONG_DOUBLE) + store_data(std::complex()); + else if (dtype == Datatype::CDOUBLE) + store_data(std::complex()); + else if (dtype == Datatype::CFLOAT) + store_data(std::complex()); + /* @todo + .value("STRING", Datatype::STRING) + .value("VEC_STRING", Datatype::VEC_STRING) + .value("ARR_DBL_7", Datatype::ARR_DBL_7) + */ + else if (dtype == Datatype::BOOL) + store_data(bool()); else throw std::runtime_error( - std::string("Datatype '") + - std::string(py::str(a.dtype())) + + std::string("Datatype '") + std::string(py::str(a.dtype())) + std::string("' not known in 'storeChunk'!")); } @@ -344,7 +350,7 @@ store_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent co * Called with a py::tuple of slices and a py::array */ inline void -store_chunk(RecordComponent & r, py::array & a, py::tuple const & slices) +store_chunk(RecordComponent &r, py::array &a, py::tuple const &slices) { uint8_t ndim = r.getDimensionality(); auto const full_extent = r.getExtent(); @@ -352,7 +358,8 @@ store_chunk(RecordComponent & r, py::array & a, py::tuple const & slices) Offset offset; Extent extent; std::vector flatten; - std::tie(offset, extent, flatten) = parseTupleSlices(ndim, full_extent, slices); + std::tie(offset, extent, flatten) = + parseTupleSlices(ndim, full_extent, slices); store_chunk(r, a, offset, extent, flatten); } @@ -361,22 +368,21 @@ struct PythonDynamicMemoryView { using ShapeContainer = pybind11::array::ShapeContainer; - template< typename T > + template PythonDynamicMemoryView( - DynamicMemoryView< T > dynamicView, + DynamicMemoryView dynamicView, ShapeContainer arrayShape, - ShapeContainer strides ) - : m_dynamicView( std::shared_ptr< void >( - new DynamicMemoryView< T >( std::move( dynamicView ) ) ) ) - , m_arrayShape( std::move( arrayShape ) ) - , m_strides( std::move( strides ) ) - , m_datatype( determineDatatype< T >() ) - { - } + ShapeContainer strides) + : m_dynamicView(std::shared_ptr( + new DynamicMemoryView(std::move(dynamicView)))) + , m_arrayShape(std::move(arrayShape)) + , m_strides(std::move(strides)) + , m_datatype(determineDatatype()) + {} pybind11::memoryview currentView() const; - std::shared_ptr< void > m_dynamicView; + std::shared_ptr m_dynamicView; ShapeContainer m_arrayShape; ShapeContainer m_strides; Datatype m_datatype; @@ -386,112 +392,111 @@ namespace { struct GetCurrentView { - template< typename T > - pybind11::memoryview - operator()( PythonDynamicMemoryView const & dynamicView ) + template + pybind11::memoryview operator()(PythonDynamicMemoryView const &dynamicView) { - auto span = static_cast< DynamicMemoryView< T > * >( - dynamicView.m_dynamicView.get() )->currentBuffer(); + auto span = + static_cast *>(dynamicView.m_dynamicView.get()) + ->currentBuffer(); return py::memoryview::from_buffer( span.data(), dynamicView.m_arrayShape, dynamicView.m_strides, - /* readonly = */ false ); + /* readonly = */ false); } std::string errorMsg = "DynamicMemoryView"; }; -template<> +template <> pybind11::memoryview -GetCurrentView::operator()< std::string >( PythonDynamicMemoryView const & ) +GetCurrentView::operator()(PythonDynamicMemoryView const &) { - throw std::runtime_error( "[DynamicMemoryView] Only PODs allowed." ); + throw std::runtime_error("[DynamicMemoryView] Only PODs allowed."); } } // namespace pybind11::memoryview PythonDynamicMemoryView::currentView() const { static GetCurrentView const cv; - return switchNonVectorType( m_datatype, cv, *this ); + return switchNonVectorType(m_datatype, cv, *this); } namespace { struct StoreChunkSpan { - template< typename T > - PythonDynamicMemoryView operator()( - RecordComponent & r, Offset const & offset, Extent const & extent ) + template + PythonDynamicMemoryView + operator()(RecordComponent &r, Offset const &offset, Extent const &extent) { - DynamicMemoryView< T > dynamicView = - r.storeChunk< T >( offset, extent ); + DynamicMemoryView dynamicView = r.storeChunk(offset, extent); pybind11::array::ShapeContainer arrayShape( - extent.begin(), extent.end() ); - std::vector< py::ssize_t > strides( extent.size() ); + extent.begin(), extent.end()); + std::vector strides(extent.size()); { - py::ssize_t accumulator = sizeof( T ); + py::ssize_t accumulator = sizeof(T); size_t dim = extent.size(); - while( dim > 0 ) + while (dim > 0) { --dim; - strides[ dim ] = accumulator; - accumulator *= extent[ dim ]; + strides[dim] = accumulator; + accumulator *= extent[dim]; } } return PythonDynamicMemoryView( - std::move( dynamicView ), - std::move( arrayShape ), - py::array::ShapeContainer( std::move( strides ) ) ); + std::move(dynamicView), + std::move(arrayShape), + py::array::ShapeContainer(std::move(strides))); } std::string errorMsg = "RecordComponent.store_chunk()"; }; -template<> -PythonDynamicMemoryView StoreChunkSpan::operator()< std::string >( - RecordComponent &, Offset const &, Extent const & ) +template <> +PythonDynamicMemoryView StoreChunkSpan::operator()( + RecordComponent &, Offset const &, Extent const &) { throw std::runtime_error( - "[RecordComponent.store_chunk()] Only PODs allowed." ); + "[RecordComponent.store_chunk()] Only PODs allowed."); } } // namespace inline PythonDynamicMemoryView store_chunk_span( - RecordComponent & r, - Offset const & offset, - Extent const & extent, - std::vector< bool > const & flatten ) + RecordComponent &r, + Offset const &offset, + Extent const &extent, + std::vector const &flatten) { // some one-size dimensions might be flattended in our output due to // selections by index size_t const numFlattenDims = - std::count( flatten.begin(), flatten.end(), true ); - std::vector< ptrdiff_t > shape( extent.size() - numFlattenDims ); + std::count(flatten.begin(), flatten.end(), true); + std::vector shape(extent.size() - numFlattenDims); auto maskIt = flatten.begin(); std::copy_if( - std::begin( extent ), - std::end( extent ), - std::begin( shape ), - [ &maskIt ]( std::uint64_t ) { return !*( maskIt++ ); } ); + std::begin(extent), + std::end(extent), + std::begin(shape), + [&maskIt](std::uint64_t) { return !*(maskIt++); }); static StoreChunkSpan scs; - return switchNonVectorType( r.getDatatype(), scs, r, offset, extent ); + return switchNonVectorType(r.getDatatype(), scs, r, offset, extent); } inline PythonDynamicMemoryView -store_chunk_span( RecordComponent & r, py::tuple const & slices ) +store_chunk_span(RecordComponent &r, py::tuple const &slices) { uint8_t ndim = r.getDimensionality(); auto const full_extent = r.getExtent(); Offset offset; Extent extent; - std::vector< bool > flatten; - std::tie( offset, extent, flatten ) = - parseTupleSlices( ndim, full_extent, slices ); + std::vector flatten; + std::tie(offset, extent, flatten) = + parseTupleSlices(ndim, full_extent, slices); - return store_chunk_span( r, offset, extent, flatten ); + return store_chunk_span(r, offset, extent, flatten); } /** Load Chunk @@ -502,72 +507,91 @@ store_chunk_span( RecordComponent & r, py::tuple const & slices ) * Size checks of the requested chunk (spanned data is in valid bounds) * will be performed at C++ API part in RecordComponent::loadChunk . */ -void -load_chunk(RecordComponent & r, py::buffer & buffer, Offset const & offset, Extent const & extent) +void load_chunk( + RecordComponent &r, + py::buffer &buffer, + Offset const &offset, + Extent const &extent) { - auto const dtype = dtype_to_numpy( r.getDatatype() ); - py::buffer_info buffer_info = buffer.request( /* writable = */ true ); + auto const dtype = dtype_to_numpy(r.getDatatype()); + py::buffer_info buffer_info = buffer.request(/* writable = */ true); - auto const & strides = buffer_info.strides; + auto const &strides = buffer_info.strides; // this function requires a contiguous slab of memory, so check the strides // whether we have that - if( strides.size() == 0 ) + if (strides.size() == 0) { throw std::runtime_error( - "[Record_Component::load_chunk()] Empty buffer passed." ); + "[Record_Component::load_chunk()] Empty buffer passed."); } { - py::ssize_t accumulator = toBytes( r.getDatatype() ); + py::ssize_t accumulator = toBytes(r.getDatatype()); size_t dim = strides.size(); - while( dim > 0 ) + while (dim > 0) { --dim; - if( strides[ dim ] != accumulator ) + if (strides[dim] != accumulator) { throw std::runtime_error( "[Record_Component::load_chunk()] Requires contiguous slab" - " of memory." ); + " of memory."); } - accumulator *= extent[ dim ]; + accumulator *= extent[dim]; } } - if( r.getDatatype() == Datatype::CHAR ) - r.loadChunk(shareRaw((char*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::UCHAR ) - r.loadChunk(shareRaw((unsigned char*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::SHORT ) - r.loadChunk(shareRaw((short*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::INT ) - r.loadChunk(shareRaw((int*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::LONG ) - r.loadChunk(shareRaw((long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::LONGLONG ) - r.loadChunk(shareRaw((long long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::USHORT ) - r.loadChunk(shareRaw((unsigned short*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::UINT ) - r.loadChunk(shareRaw((unsigned int*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::ULONG ) - r.loadChunk(shareRaw((unsigned long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::ULONGLONG ) - r.loadChunk(shareRaw((unsigned long long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::LONG_DOUBLE ) - r.loadChunk(shareRaw((long double*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::DOUBLE ) - r.loadChunk(shareRaw((double*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::FLOAT ) - r.loadChunk(shareRaw((float*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::CLONG_DOUBLE ) - r.loadChunk>(shareRaw((std::complex*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::CDOUBLE ) - r.loadChunk>(shareRaw((std::complex*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::CFLOAT ) - r.loadChunk>(shareRaw((std::complex*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::BOOL ) - r.loadChunk(shareRaw((bool*) buffer_info.ptr), offset, extent); + // here, we increase a reference on the user-passed data so that + // temporary and lost-scope variables stay alive until we flush + // note: this does not yet prevent the user, as in C++, to build + // a race condition by manipulating the data they passed + auto load_data = + [&r, &buffer, &buffer_info, &offset, &extent](auto cxxtype) { + using CXXType = decltype(cxxtype); + buffer.inc_ref(); + // buffer_info.inc_ref(); + void *data = buffer_info.ptr; + std::shared_ptr shared( + (CXXType *)data, [buffer](CXXType *) { buffer.dec_ref(); }); + r.loadChunk(std::move(shared), offset, extent); + }; + + if (r.getDatatype() == Datatype::CHAR) + load_data((char)0); + else if (r.getDatatype() == Datatype::UCHAR) + load_data((unsigned char)0); + else if (r.getDatatype() == Datatype::SHORT) + load_data((short)0); + else if (r.getDatatype() == Datatype::INT) + load_data((int)0); + else if (r.getDatatype() == Datatype::LONG) + load_data((long)0); + else if (r.getDatatype() == Datatype::LONGLONG) + load_data((long long)0); + else if (r.getDatatype() == Datatype::USHORT) + load_data((unsigned short)0); + else if (r.getDatatype() == Datatype::UINT) + load_data((unsigned int)0); + else if (r.getDatatype() == Datatype::ULONG) + load_data((unsigned long)0); + else if (r.getDatatype() == Datatype::ULONGLONG) + load_data((unsigned long long)0); + else if (r.getDatatype() == Datatype::LONG_DOUBLE) + load_data((long double)0); + else if (r.getDatatype() == Datatype::DOUBLE) + load_data((double)0); + else if (r.getDatatype() == Datatype::FLOAT) + load_data((float)0); + else if (r.getDatatype() == Datatype::CLONG_DOUBLE) + load_data((std::complex)0); + else if (r.getDatatype() == Datatype::CDOUBLE) + load_data((std::complex)0); + else if (r.getDatatype() == Datatype::CFLOAT) + load_data((std::complex)0); + else if (r.getDatatype() == Datatype::BOOL) + load_data((bool)0); else - throw std::runtime_error(std::string("Datatype not known in 'loadChunk'!")); + throw std::runtime_error( + std::string("Datatype not known in 'loadChunk'!")); } /** Load Chunk @@ -578,19 +602,24 @@ load_chunk(RecordComponent & r, py::buffer & buffer, Offset const & offset, Exte * Size checks of the requested chunk (spanned data is in valid bounds) * will be performed at C++ API part in RecordComponent::loadChunk . */ -inline void -load_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent const & extent) +inline void load_chunk( + RecordComponent &r, + py::array &a, + Offset const &offset, + Extent const &extent) { // check array is large enough size_t s_load = 1u; size_t s_array = 1u; std::string str_extent_shape; std::string str_array_shape; - for( auto & si : extent ) { + for (auto &si : extent) + { s_load *= si; str_extent_shape.append(" ").append(std::to_string(si)); } - for( py::ssize_t d = 0; d < a.ndim(); ++d ) { + for (py::ssize_t d = 0; d < a.ndim(); ++d) + { s_array *= a.shape()[d]; str_array_shape.append(" ").append(std::to_string(a.shape()[d])); } @@ -606,63 +635,76 @@ load_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent con std::string("D)") ); */ - if( s_array < s_load ) { + if (s_array < s_load) + { throw py::index_error( - std::string("size of array (") + - std::to_string(s_array) + - std::string("; shape:") + - str_array_shape + + std::string("size of array (") + std::to_string(s_array) + + std::string("; shape:") + str_array_shape + std::string(") is smaller than size of selection " "in record component (") + - std::to_string(s_load) + - std::string("; shape:") + - str_extent_shape + - std::string(")") - ); + std::to_string(s_load) + std::string("; shape:") + + str_extent_shape + std::string(")")); } - check_buffer_is_contiguous( a ); + check_buffer_is_contiguous(a); // here, we increase a reference on the user-passed data so that // temporary and lost-scope variables stay alive until we flush // note: this does not yet prevent the user, as in C++, to build // a race condition by manipulating the data they passed - auto load_data = [ &r, &a, &offset, &extent ]( auto cxxtype ) { + auto load_data = [&r, &a, &offset, &extent](auto cxxtype) { using CXXType = decltype(cxxtype); a.inc_ref(); - void* data = a.mutable_data(); - std::shared_ptr< CXXType > shared( ( CXXType * )data, - [ a ]( CXXType * ) { a.dec_ref(); } ); - r.loadChunk( std::move( shared ), offset, extent ); + void *data = a.mutable_data(); + std::shared_ptr shared( + (CXXType *)data, [a](CXXType *) { a.dec_ref(); }); + r.loadChunk(std::move(shared), offset, extent); }; - if( r.getDatatype() == Datatype::CHAR ) load_data( char() ); - else if( r.getDatatype() == Datatype::UCHAR ) load_data( (unsigned char)0 ); - else if( r.getDatatype() == Datatype::SHORT ) load_data( short() ); - else if( r.getDatatype() == Datatype::INT ) load_data( int() ); - else if( r.getDatatype() == Datatype::LONG ) load_data( long() ); - else if( r.getDatatype() == Datatype::LONGLONG ) load_data( (long long)0 ); - else if( r.getDatatype() == Datatype::USHORT ) load_data( (unsigned short)0 ); - else if( r.getDatatype() == Datatype::UINT ) load_data( (unsigned int)0 ); - else if( r.getDatatype() == Datatype::ULONG ) load_data( (unsigned long)0 ); - else if( r.getDatatype() == Datatype::ULONGLONG ) load_data( (unsigned long long)0 ); - else if( r.getDatatype() == Datatype::LONG_DOUBLE ) load_data( (long double)0 ); - else if( r.getDatatype() == Datatype::DOUBLE ) load_data( double() ); - else if( r.getDatatype() == Datatype::FLOAT ) load_data( float() ); - else if( r.getDatatype() == Datatype::CLONG_DOUBLE ) load_data( std::complex() ); - else if( r.getDatatype() == Datatype::CDOUBLE ) load_data( std::complex() ); - else if( r.getDatatype() == Datatype::CFLOAT ) load_data( std::complex() ); - else if( r.getDatatype() == Datatype::BOOL ) load_data( bool() ); + if (r.getDatatype() == Datatype::CHAR) + load_data(char()); + else if (r.getDatatype() == Datatype::UCHAR) + load_data((unsigned char)0); + else if (r.getDatatype() == Datatype::SHORT) + load_data(short()); + else if (r.getDatatype() == Datatype::INT) + load_data(int()); + else if (r.getDatatype() == Datatype::LONG) + load_data(long()); + else if (r.getDatatype() == Datatype::LONGLONG) + load_data((long long)0); + else if (r.getDatatype() == Datatype::USHORT) + load_data((unsigned short)0); + else if (r.getDatatype() == Datatype::UINT) + load_data((unsigned int)0); + else if (r.getDatatype() == Datatype::ULONG) + load_data((unsigned long)0); + else if (r.getDatatype() == Datatype::ULONGLONG) + load_data((unsigned long long)0); + else if (r.getDatatype() == Datatype::LONG_DOUBLE) + load_data((long double)0); + else if (r.getDatatype() == Datatype::DOUBLE) + load_data(double()); + else if (r.getDatatype() == Datatype::FLOAT) + load_data(float()); + else if (r.getDatatype() == Datatype::CLONG_DOUBLE) + load_data(std::complex()); + else if (r.getDatatype() == Datatype::CDOUBLE) + load_data(std::complex()); + else if (r.getDatatype() == Datatype::CFLOAT) + load_data(std::complex()); + else if (r.getDatatype() == Datatype::BOOL) + load_data(bool()); else - throw std::runtime_error(std::string("Datatype not known in 'load_chunk'!")); + throw std::runtime_error( + std::string("Datatype not known in 'load_chunk'!")); } /** Load Chunk * * Called with a py::tuple of slices. */ -inline py::array -load_chunk(RecordComponent & r, py::tuple const & slices) +inline py::array load_chunk(RecordComponent &r, py::tuple const &slices) { uint8_t ndim = r.getDimensionality(); auto const full_extent = r.getExtent(); @@ -670,52 +712,54 @@ load_chunk(RecordComponent & r, py::tuple const & slices) Offset offset; Extent extent; std::vector flatten; - std::tie(offset, extent, flatten) = parseTupleSlices(ndim, full_extent, slices); + std::tie(offset, extent, flatten) = + parseTupleSlices(ndim, full_extent, slices); - // some one-size dimensions might be flattended in our output due to selections by index - size_t const numFlattenDims = std::count(flatten.begin(), flatten.end(), true); - std::vector< ptrdiff_t > shape(extent.size() - numFlattenDims); + // some one-size dimensions might be flattended in our output due to + // selections by index + size_t const numFlattenDims = + std::count(flatten.begin(), flatten.end(), true); + std::vector shape(extent.size() - numFlattenDims); auto maskIt = flatten.begin(); std::copy_if( - std::begin(extent), - std::end(extent), - std::begin(shape), - [&maskIt](std::uint64_t){ - return !*(maskIt++); - } - ); + std::begin(extent), + std::end(extent), + std::begin(shape), + [&maskIt](std::uint64_t) { return !*(maskIt++); }); - auto const dtype = dtype_to_numpy( r.getDatatype() ); - auto a = py::array( dtype, shape ); + auto const dtype = dtype_to_numpy(r.getDatatype()); + auto a = py::array(dtype, shape); load_chunk(r, a, offset, extent); return a; } -void init_RecordComponent(py::module &m) { +void init_RecordComponent(py::module &m) +{ py::class_(m, "Dynamic_Memory_View") - .def("__repr__", - [](PythonDynamicMemoryView const & view) { - return "size()) + "'>"; - } - ) - .def("current_buffer", - [](PythonDynamicMemoryView const & view) { - return view.currentView(); - } - ); + .def( + "__repr__", + [](PythonDynamicMemoryView const &view) { + return "size()) + "'>"; + }) + .def("current_buffer", [](PythonDynamicMemoryView const &view) { + return view.currentView(); + }); py::class_ cl(m, "Record_Component"); - cl - .def("__repr__", - [](RecordComponent const & rc) { - return ""; - } - ) - - .def_property("unit_SI", &BaseRecordComponent::unitSI, &RecordComponent::setUnitSI) + cl.def( + "__repr__", + [](RecordComponent const &rc) { + return ""; + }) + + .def_property( + "unit_SI", + &BaseRecordComponent::unitSI, + &RecordComponent::setUnitSI) .def("reset_dataset", &RecordComponent::resetDataset) @@ -724,120 +768,144 @@ void init_RecordComponent(py::module &m) { .def_property_readonly("empty", &RecordComponent::empty) // buffer types - .def("make_constant", [](RecordComponent & rc, py::buffer & a) { - py::buffer_info buf = a.request(); - auto const dtype = dtype_from_bufferformat( buf.format ); + .def( + "make_constant", + [](RecordComponent &rc, py::buffer &a) { + py::buffer_info buf = a.request(); + auto const dtype = dtype_from_bufferformat(buf.format); - using DT = Datatype; + using DT = Datatype; - // allow one-element n-dimensional buffers as well - py::ssize_t numElements = 1; - if( buf.ndim > 0 ) { - for( auto d = 0; d < buf.ndim; ++d ) - numElements *= buf.shape.at(d); - } + // allow one-element n-dimensional buffers as well + py::ssize_t numElements = 1; + if (buf.ndim > 0) + { + for (auto d = 0; d < buf.ndim; ++d) + numElements *= buf.shape.at(d); + } - // Numpy: Handling of arrays and scalars - // work-around for https://github.com/pybind/pybind11/issues/1224 - // -> passing numpy scalars as buffers needs numpy 1.15+ - // https://github.com/numpy/numpy/issues/10265 - // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 - // scalars, see PEP 3118 - // requires Numpy 1.15+ - if( numElements == 1 ) { - // refs: - // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html - // https://docs.python.org/3/library/struct.html#format-characters - // std::cout << " scalar type '" << buf.format << "'" << std::endl; - // typestring: encoding + type + number of bytes - switch( dtype ) + // Numpy: Handling of arrays and scalars + // work-around for + // https://github.com/pybind/pybind11/issues/1224 + // -> passing numpy scalars as buffers needs numpy 1.15+ + // https://github.com/numpy/numpy/issues/10265 + // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 + // scalars, see PEP 3118 + // requires Numpy 1.15+ + if (numElements == 1) { + // refs: + // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html + // https://docs.python.org/3/library/struct.html#format-characters + // std::cout << " scalar type '" << buf.format << "'" << + // std::endl; typestring: encoding + type + number of bytes + switch (dtype) + { case DT::BOOL: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::CHAR: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::SHORT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::INT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::LONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::LONGLONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::UCHAR: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::USHORT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::UINT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::ULONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::ULONGLONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::FLOAT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::DOUBLE: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::LONG_DOUBLE: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::CFLOAT: - return rc.makeConstant( *static_cast*>(buf.ptr) ); + return rc.makeConstant( + *static_cast *>(buf.ptr)); break; case DT::CDOUBLE: - return rc.makeConstant( *static_cast*>(buf.ptr) ); + return rc.makeConstant( + *static_cast *>(buf.ptr)); break; case DT::CLONG_DOUBLE: - return rc.makeConstant( *static_cast*>(buf.ptr) ); + return rc.makeConstant( + *static_cast *>(buf.ptr)); break; default: - throw std::runtime_error("make_constant: " + throw std::runtime_error( + "make_constant: " "Unknown Datatype!"); + } } - } - else - { - throw std::runtime_error("make_constant: " - "Only scalar values supported!"); - } - - }, py::arg("value") - ) + else + { + throw std::runtime_error( + "make_constant: " + "Only scalar values supported!"); + } + }, + py::arg("value")) // allowed python intrinsics, after (!) buffer matching - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_empty", - []( RecordComponent & rc, Datatype dt, uint8_t dimensionality ) - { - return rc.makeEmpty( dt, dimensionality ); + .def( + "make_empty", + [](RecordComponent &rc, Datatype dt, uint8_t dimensionality) { + return rc.makeEmpty(dt, dimensionality); }, - py::arg("datatype"), py::arg("dimensionality")) - .def("make_empty", - []( - RecordComponent & rc, - pybind11::dtype const dt, - uint8_t dimensionality ) - { - return rc.makeEmpty( dtype_from_numpy( dt ), dimensionality ); + py::arg("datatype"), + py::arg("dimensionality")) + .def( + "make_empty", + [](RecordComponent &rc, + pybind11::dtype const dt, + uint8_t dimensionality) { + return rc.makeEmpty(dtype_from_numpy(dt), dimensionality); }) // TODO if we also want to support scalar arrays, we have to switch @@ -845,173 +913,192 @@ void init_RecordComponent(py::module &m) { // https://github.com/pybind/pybind11/pull/1537 // slicing protocol - .def("__getitem__", [](RecordComponent & r, py::tuple const & slices) { - return load_chunk(r, slices); - }, - py::arg("tuple of index slices") - ) - .def("__getitem__", [](RecordComponent & r, py::slice const & slice_obj) { - auto const slices = py::make_tuple(slice_obj); - return load_chunk(r, slices); - }, - py::arg("slice") - ) - .def("__getitem__", [](RecordComponent & r, py::int_ const & slice_obj) { - auto const slices = py::make_tuple(slice_obj); - return load_chunk(r, slices); - }, - py::arg("axis index") - ) - - .def("__setitem__", [](RecordComponent & r, py::tuple const & slices, py::array & a ) { - store_chunk(r, a, slices); - }, + .def( + "__getitem__", + [](RecordComponent &r, py::tuple const &slices) { + return load_chunk(r, slices); + }, + py::arg("tuple of index slices")) + .def( + "__getitem__", + [](RecordComponent &r, py::slice const &slice_obj) { + auto const slices = py::make_tuple(slice_obj); + return load_chunk(r, slices); + }, + py::arg("slice")) + .def( + "__getitem__", + [](RecordComponent &r, py::int_ const &slice_obj) { + auto const slices = py::make_tuple(slice_obj); + return load_chunk(r, slices); + }, + py::arg("axis index")) + + .def( + "__setitem__", + [](RecordComponent &r, py::tuple const &slices, py::array &a) { + store_chunk(r, a, slices); + }, py::arg("tuple of index slices"), - py::arg("array with values to assign") - ) - .def("__setitem__", [](RecordComponent & r, py::slice const & slice_obj, py::array & a ) { - auto const slices = py::make_tuple(slice_obj); - store_chunk(r, a, slices); - }, + py::arg("array with values to assign")) + .def( + "__setitem__", + [](RecordComponent &r, py::slice const &slice_obj, py::array &a) { + auto const slices = py::make_tuple(slice_obj); + store_chunk(r, a, slices); + }, py::arg("slice"), - py::arg("array with values to assign") - ) - .def("__setitem__", [](RecordComponent & r, py::int_ const & slice_obj, py::array & a ) { - auto const slices = py::make_tuple(slice_obj); - store_chunk(r, a, slices); - }, + py::arg("array with values to assign")) + .def( + "__setitem__", + [](RecordComponent &r, py::int_ const &slice_obj, py::array &a) { + auto const slices = py::make_tuple(slice_obj); + store_chunk(r, a, slices); + }, py::arg("axis index"), - py::arg("array with values to assign") - ) + py::arg("array with values to assign")) // deprecated: pass-through C++ API - .def("load_chunk", [](RecordComponent & r, Offset const & offset_in, Extent const & extent_in) { - uint8_t ndim = r.getDimensionality(); - - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u ) - offset = Offset(ndim, 0u); - - // extent = {-1u}: take full size - Extent extent(ndim, 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - { - extent = r.getExtent(); - for( uint8_t i = 0u; i < ndim; ++i ) - extent[i] -= offset[i]; - } - else - extent = extent_in; - - std::vector< ptrdiff_t > shape(extent.size()); - std::copy(std::begin(extent), std::end(extent), std::begin(shape)); - auto const dtype = dtype_to_numpy( r.getDatatype() ); - auto a = py::array( dtype, shape ); - load_chunk(r, a, offset, extent); - - return a; - }, - py::arg_v("offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), - py::arg_v("extent", Extent(1, -1u), "Record_Component.shape") - ) - .def("load_chunk", []( - RecordComponent & r, - py::buffer buffer, - Offset const & offset_in, - Extent const & extent_in) - { - uint8_t ndim = r.getDimensionality(); + .def( + "load_chunk", + [](RecordComponent &r, + Offset const &offset_in, + Extent const &extent_in) { + uint8_t ndim = r.getDimensionality(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u) + offset = Offset(ndim, 0u); + + // extent = {-1u}: take full size + Extent extent(ndim, 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + { + extent = r.getExtent(); + for (uint8_t i = 0u; i < ndim; ++i) + extent[i] -= offset[i]; + } + else + extent = extent_in; - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u ) - offset = Offset(ndim, 0u); + std::vector shape(extent.size()); + std::copy( + std::begin(extent), std::end(extent), std::begin(shape)); + auto const dtype = dtype_to_numpy(r.getDatatype()); + auto a = py::array(dtype, shape); + load_chunk(r, a, offset, extent); - // extent = {-1u}: take full size - Extent extent(ndim, 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - { - extent = r.getExtent(); - for( uint8_t i = 0u; i < ndim; ++i ) - extent[i] -= offset[i]; - } - else - extent = extent_in; + return a; + }, + py::arg_v( + "offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), + py::arg_v("extent", Extent(1, -1u), "Record_Component.shape")) + .def( + "load_chunk", + [](RecordComponent &r, + py::buffer buffer, + Offset const &offset_in, + Extent const &extent_in) { + uint8_t ndim = r.getDimensionality(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u) + offset = Offset(ndim, 0u); + + // extent = {-1u}: take full size + Extent extent(ndim, 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + { + extent = r.getExtent(); + for (uint8_t i = 0u; i < ndim; ++i) + extent[i] -= offset[i]; + } + else + extent = extent_in; - std::vector flatten(ndim, false); - load_chunk(r, buffer, offset, extent); - }, + std::vector flatten(ndim, false); + load_chunk(r, buffer, offset, extent); + }, py::arg("pre-allocated buffer"), - py::arg_v("offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), - py::arg_v("extent", Extent(1, -1u), "Record_Component.shape") - ) + py::arg_v( + "offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), + py::arg_v("extent", Extent(1, -1u), "Record_Component.shape")) // deprecated: pass-through C++ API - .def("store_chunk", [](RecordComponent & r, py::array & a, Offset const & offset_in, Extent const & extent_in) { - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u && a.ndim() > 1u ) - offset = Offset(a.ndim(), 0u); - - // extent = {-1u}: take full size - Extent extent(a.ndim(), 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - for( auto d = 0; d < a.ndim(); ++d ) - extent.at(d) = a.shape()[d]; - else - extent = extent_in; - - std::vector flatten(r.getDimensionality(), false); - store_chunk(r, a, offset, extent, flatten); - }, + .def( + "store_chunk", + [](RecordComponent &r, + py::array &a, + Offset const &offset_in, + Extent const &extent_in) { + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u && + a.ndim() > 1) + offset = Offset(a.ndim(), 0u); + + // extent = {-1u}: take full size + Extent extent(a.ndim(), 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + for (auto d = 0; d < a.ndim(); ++d) + extent.at(d) = a.shape()[d]; + else + extent = extent_in; + + std::vector flatten(r.getDimensionality(), false); + store_chunk(r, a, offset, extent, flatten); + }, py::arg("array"), - py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), - py::arg_v("extent", Extent(1, -1u), "array.shape") - ) - .def("store_chunk", [](RecordComponent & r, Offset const & offset_in, Extent const & extent_in) { - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - unsigned dimensionality = r.getDimensionality(); - Extent const & totalExtent = r.getExtent(); - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u && dimensionality > 1u ) - offset = Offset(dimensionality, 0u); - - // extent = {-1u}: take full size - Extent extent(dimensionality, 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - for( unsigned d = 0; d < dimensionality; ++d ) - extent.at(d) = totalExtent[d]; - else - extent = extent_in; - - std::vector flatten(r.getDimensionality(), false); - return store_chunk_span(r, offset, extent, flatten); - }, - py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), - py::arg_v("extent", Extent(1, -1u), "array.shape") - ) + py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), + py::arg_v("extent", Extent(1, -1u), "array.shape")) + .def( + "store_chunk", + [](RecordComponent &r, + Offset const &offset_in, + Extent const &extent_in) { + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + unsigned dimensionality = r.getDimensionality(); + Extent const &totalExtent = r.getExtent(); + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u && + dimensionality > 1u) + offset = Offset(dimensionality, 0u); + + // extent = {-1u}: take full size + Extent extent(dimensionality, 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + for (unsigned d = 0; d < dimensionality; ++d) + extent.at(d) = totalExtent[d]; + else + extent = extent_in; + + std::vector flatten(r.getDimensionality(), false); + return store_chunk_span(r, offset, extent, flatten); + }, + py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), + py::arg_v("extent", Extent(1, -1u), "array.shape")) - .def_property_readonly_static("SCALAR", [](py::object){ return RecordComponent::SCALAR; }) + .def_property_readonly_static( + "SCALAR", [](py::object) { return RecordComponent::SCALAR; }) // TODO remove in future versions (deprecated) .def("set_unit_SI", &RecordComponent::setUnitSI) // deprecated - ; + ; add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); - return series.iterations[n_it].particles[group.at(3)][group.at(4)][group.at(5)]; - } - ); + return series.iterations[n_it] + .particles[group.at(3)][group.at(4)][group.at(5)]; + }); py::enum_(m, "Allocation") .value("USER", RecordComponent::Allocation::USER) .value("API", RecordComponent::Allocation::API) - .value("AUTO", RecordComponent::Allocation::AUTO) - ; + .value("AUTO", RecordComponent::Allocation::AUTO); } diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index b8bb2e8298..e95ed1bfbb 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -22,13 +22,13 @@ #include #include -#include "openPMD/config.hpp" #include "openPMD/Series.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_MPI // re-implemented signatures: // include -# include +#include #endif #include @@ -37,136 +37,165 @@ namespace py = pybind11; using namespace openPMD; #if openPMD_HAVE_MPI - /** mpi4py communicator wrapper - * - * refs: - * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/libmpi.pxd#L35-L36 - * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/MPI.pxd#L100-L105 - * - installed: include/mpi4py/mpi4py.MPI.h - */ - struct openPMD_PyMPICommObject - { - PyObject_HEAD - MPI_Comm ob_mpi; - unsigned int flags; - }; - using openPMD_PyMPIIntracommObject = openPMD_PyMPICommObject; +/** mpi4py communicator wrapper + * + * refs: + * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/libmpi.pxd#L35-L36 + * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/MPI.pxd#L100-L105 + * - installed: include/mpi4py/mpi4py.MPI.h + */ +struct openPMD_PyMPICommObject +{ + PyObject_HEAD MPI_Comm ob_mpi; + unsigned int flags; +}; +using openPMD_PyMPIIntracommObject = openPMD_PyMPICommObject; #endif - -void init_Series(py::module &m) { +void init_Series(py::module &m) +{ using iterations_key_t = decltype(Series::iterations)::key_type; py::class_(m, "WriteIterations") - .def("__getitem__", - [](WriteIterations writeIterations, iterations_key_t key){ + .def( + "__getitem__", + [](WriteIterations writeIterations, iterations_key_t key) { return writeIterations[key]; - }, - // keep container alive while iterator exists - py::keep_alive<0, 1>()) - ; + }, + // keep container alive while iterator exists + py::keep_alive<0, 1>()); py::class_(m, "IndexedIteration") - .def_readonly( - "iteration_index", &IndexedIteration::iterationIndex) - ; + .def_readonly("iteration_index", &IndexedIteration::iterationIndex); py::class_(m, "ReadIterations") - .def("__iter__", [](ReadIterations & readIterations) { - return py::make_iterator( - readIterations.begin(), readIterations.end()); - }, - // keep handle alive while iterator exists - py::keep_alive<0, 1>()) - ; + .def( + "__iter__", + [](ReadIterations &readIterations) { + return py::make_iterator( + readIterations.begin(), readIterations.end()); + }, + // keep handle alive while iterator exists + py::keep_alive<0, 1>()); py::class_(m, "Series") - .def(py::init(), + .def( + py::init(), py::arg("filepath"), py::arg("access"), py::arg("options") = "{}") #if openPMD_HAVE_MPI - .def(py::init([]( - std::string const& filepath, - Access at, - py::object &comm, - std::string const& options ){ - //! TODO perform mpi4py import test and check min-version - //! careful: double MPI_Init risk? only import mpi4py.MPI? - //! required C-API init? probably just checks: - //! refs: - //! - https://bitbucket.org/mpi4py/mpi4py/src/3.0.0/demo/wrap-c/helloworld.c - //! - installed: include/mpi4py/mpi4py.MPI_api.h - // if( import_mpi4py() < 0 ) { here be dragons } - - if( comm.ptr() == Py_None ) - throw std::runtime_error("Series: MPI communicator cannot be None."); - if( comm.ptr() == nullptr ) - throw std::runtime_error("Series: MPI communicator is a nullptr."); - - // check type string to see if this is mpi4py - // __str__ (pretty) - // __repr__ (unambiguous) - // mpi4py: - // pyMPI: ... (TODO) - py::str const comm_pystr = py::repr(comm); - std::string const comm_str = comm_pystr.cast(); - if( comm_str.substr(0, 12) != std::string(" >(comm.get_type()) ) - // TODO add mpi4py version from above import check to error message - throw std::runtime_error("Series: comm has unexpected type layout in " + - comm_str + - " (Mismatched MPI at compile vs. runtime? " - "Breaking mpi4py release?)"); - - // todo other possible implementations: - // - pyMPI (inactive since 2008?): import mpi; mpi.WORLD - - // reimplementation of mpi4py's: - // MPI_Comm* mpiCommPtr = PyMPIComm_Get(comm.ptr()); - MPI_Comm* mpiCommPtr = &((openPMD_PyMPIIntracommObject*)(comm.ptr()))->ob_mpi; - - if( PyErr_Occurred() ) - throw std::runtime_error("Series: MPI communicator access error."); - if( mpiCommPtr == nullptr ) { - throw std::runtime_error("Series: MPI communicator cast failed. " - "(Mismatched MPI at compile vs. runtime?)"); - } - - return new Series(filepath, at, *mpiCommPtr, options ); - }), + .def( + py::init([](std::string const &filepath, + Access at, + py::object &comm, + std::string const &options) { + //! TODO perform mpi4py import test and check min-version + //! careful: double MPI_Init risk? only import mpi4py.MPI? + //! required C-API init? probably just checks: + //! refs: + //! - + //! https://bitbucket.org/mpi4py/mpi4py/src/3.0.0/demo/wrap-c/helloworld.c + //! - installed: include/mpi4py/mpi4py.MPI_api.h + // if( import_mpi4py() < 0 ) { here be dragons } + + if (comm.ptr() == Py_None) + throw std::runtime_error( + "Series: MPI communicator cannot be None."); + if (comm.ptr() == nullptr) + throw std::runtime_error( + "Series: MPI communicator is a nullptr."); + + // check type string to see if this is mpi4py + // __str__ (pretty) + // __repr__ (unambiguous) + // mpi4py: + // pyMPI: ... (TODO) + py::str const comm_pystr = py::repr(comm); + std::string const comm_str = comm_pystr.cast(); + if (comm_str.substr(0, 12) != std::string(" >( + comm.get_type())) + // TODO add mpi4py version from above import check to error + // message + throw std::runtime_error( + "Series: comm has unexpected type layout in " + + comm_str + + " (Mismatched MPI at compile vs. runtime? " + "Breaking mpi4py release?)"); + + // todo other possible implementations: + // - pyMPI (inactive since 2008?): import mpi; mpi.WORLD + + // reimplementation of mpi4py's: + // MPI_Comm* mpiCommPtr = PyMPIComm_Get(comm.ptr()); + MPI_Comm *mpiCommPtr = + &((openPMD_PyMPIIntracommObject *)(comm.ptr()))->ob_mpi; + + if (PyErr_Occurred()) + throw std::runtime_error( + "Series: MPI communicator access error."); + if (mpiCommPtr == nullptr) + { + throw std::runtime_error( + "Series: MPI communicator cast failed. " + "(Mismatched MPI at compile vs. runtime?)"); + } + + return new Series(filepath, at, *mpiCommPtr, options); + }), py::arg("filepath"), py::arg("access"), py::arg("mpi_communicator"), - py::arg("options") = "{}" - ) + py::arg("options") = "{}") #endif .def_property("openPMD", &Series::openPMD, &Series::setOpenPMD) - .def_property("openPMD_extension", &Series::openPMDextension, &Series::setOpenPMDextension) + .def_property( + "openPMD_extension", + &Series::openPMDextension, + &Series::setOpenPMDextension) .def_property("base_path", &Series::basePath, &Series::setBasePath) - .def_property("meshes_path", &Series::meshesPath, &Series::setMeshesPath) - .def_property("particles_path", &Series::particlesPath, &Series::setParticlesPath) + .def_property( + "meshes_path", &Series::meshesPath, &Series::setMeshesPath) + .def_property( + "particles_path", &Series::particlesPath, &Series::setParticlesPath) .def_property("author", &Series::author, &Series::setAuthor) - .def_property("machine", + .def_property( + "machine", &Series::machine, &Series::setMachine, "Indicate the machine or relevant hardware that created the file.") .def_property_readonly("software", &Series::software) - .def("set_software", &Series::setSoftware, - py::arg("name"), py::arg("version") = std::string("unspecified")) + .def( + "set_software", + &Series::setSoftware, + py::arg("name"), + py::arg("version") = std::string("unspecified")) .def_property_readonly("software_version", &Series::softwareVersion) - .def("set_software_version", [](Series & s, std::string const& softwareVersion) { - py::print("Series.set_software_version is deprecated. Set the version with the second argument of Series.set_software"); - s.setSoftware(s.software(), softwareVersion); - }) + .def( + "set_software_version", + [](Series &s, std::string const &softwareVersion) { + py::print( + "Series.set_software_version is deprecated. Set the " + "version with the second argument of Series.set_software"); + s.setSoftware(s.software(), softwareVersion); + }) // softwareDependencies // machine .def_property("date", &Series::date, &Series::setDate) - .def_property("iteration_encoding", &Series::iterationEncoding, &Series::setIterationEncoding) - .def_property("iteration_format", &Series::iterationFormat, &Series::setIterationFormat) + .def_property( + "iteration_encoding", + &Series::iterationEncoding, + &Series::setIterationEncoding) + .def_property( + "iteration_format", + &Series::iterationFormat, + &Series::setIterationFormat) .def_property("name", &Series::name, &Series::setName) .def("flush", &Series::flush) @@ -184,12 +213,15 @@ void init_Series(py::module &m) { .def("set_iteration_format", &Series::setIterationFormat) .def("set_name", &Series::setName) - .def_readwrite("iterations", &Series::iterations, + .def_readwrite( + "iterations", + &Series::iterations, py::return_value_policy::reference, // garbage collection: return value must be freed before Series py::keep_alive<1, 0>()) .def("read_iterations", &Series::readIterations, py::keep_alive<0, 1>()) - .def("write_iterations", - &Series::writeIterations, py::keep_alive<0, 1>()) - ; + .def( + "write_iterations", + &Series::writeIterations, + py::keep_alive<0, 1>()); } diff --git a/src/binding/python/UnitDimension.cpp b/src/binding/python/UnitDimension.cpp index e1477eca25..72074cde78 100644 --- a/src/binding/python/UnitDimension.cpp +++ b/src/binding/python/UnitDimension.cpp @@ -26,8 +26,8 @@ namespace py = pybind11; using namespace openPMD; - -void init_UnitDimension(py::module &m) { +void init_UnitDimension(py::module &m) +{ py::enum_(m, "Unit_Dimension") .value("L", UnitDimension::L) .value("M", UnitDimension::M) @@ -35,6 +35,5 @@ void init_UnitDimension(py::module &m) { .value("I", UnitDimension::I) .value("theta", UnitDimension::theta) .value("N", UnitDimension::N) - .value("J", UnitDimension::J) - ; + .value("J", UnitDimension::J); } diff --git a/src/binding/python/openPMD.cpp b/src/binding/python/openPMD.cpp index 2b9d6b2c1b..ce6f7d44c2 100644 --- a/src/binding/python/openPMD.cpp +++ b/src/binding/python/openPMD.cpp @@ -24,13 +24,12 @@ #include "openPMD/config.hpp" #include "openPMD/version.hpp" -#include #include #include +#include namespace py = pybind11; - // forward declarations of exposed classes void init_Access(py::module &); void init_Attributable(py::module &); @@ -54,8 +53,8 @@ void init_RecordComponent(py::module &); void init_Series(py::module &); void init_UnitDimension(py::module &); - -PYBIND11_MODULE(openpmd_api_cxx, m) { +PYBIND11_MODULE(openpmd_api_cxx, m) +{ m.doc() = R"pbdoc( openPMD-api ----------- @@ -127,4 +126,3 @@ PYBIND11_MODULE(openpmd_api_cxx, m) { // TODO allow to query runtime versions of all dependencies // (also needed in C++ frontend) } - diff --git a/src/binding/python/openpmd_api/DaskArray.py b/src/binding/python/openpmd_api/DaskArray.py index 1cb4260700..0d2a1ec4ad 100644 --- a/src/binding/python/openpmd_api/DaskArray.py +++ b/src/binding/python/openpmd_api/DaskArray.py @@ -6,7 +6,9 @@ License: LGPLv3+ """ import math + import numpy as np + try: from dask.array import from_array found_dask = True diff --git a/src/binding/python/openpmd_api/DaskDataFrame.py b/src/binding/python/openpmd_api/DaskDataFrame.py index 7d0fb9204c..fa18f7c076 100644 --- a/src/binding/python/openpmd_api/DaskDataFrame.py +++ b/src/binding/python/openpmd_api/DaskDataFrame.py @@ -6,6 +6,7 @@ License: LGPLv3+ """ import numpy as np + try: import dask.dataframe as dd from dask.delayed import delayed diff --git a/src/binding/python/openpmd_api/DataFrame.py b/src/binding/python/openpmd_api/DataFrame.py index 55e14fcafb..d0e01acab8 100644 --- a/src/binding/python/openpmd_api/DataFrame.py +++ b/src/binding/python/openpmd_api/DataFrame.py @@ -6,7 +6,9 @@ License: LGPLv3+ """ import math + import numpy as np + try: import pandas as pd found_pandas = True diff --git a/src/binding/python/openpmd_api/__init__.py b/src/binding/python/openpmd_api/__init__.py index af045c871c..e1bb49ef7e 100644 --- a/src/binding/python/openpmd_api/__init__.py +++ b/src/binding/python/openpmd_api/__init__.py @@ -1,9 +1,8 @@ from . import openpmd_api_cxx as cxx -from .openpmd_api_cxx import * # noqa -from .DataFrame import particles_to_dataframe -from .DaskDataFrame import particles_to_daskdataframe from .DaskArray import record_component_to_daskarray - +from .DaskDataFrame import particles_to_daskdataframe +from .DataFrame import particles_to_dataframe +from .openpmd_api_cxx import * # noqa __version__ = cxx.__version__ __doc__ = cxx.__doc__ diff --git a/src/binding/python/openpmd_api/ls/__main__.py b/src/binding/python/openpmd_api/ls/__main__.py index 2a4007755e..15d43875b9 100644 --- a/src/binding/python/openpmd_api/ls/__main__.py +++ b/src/binding/python/openpmd_api/ls/__main__.py @@ -9,6 +9,7 @@ License: LGPLv3+ """ import sys + from ..openpmd_api_cxx import _ls_run diff --git a/src/binding/python/openpmd_api/pipe/__main__.py b/src/binding/python/openpmd_api/pipe/__main__.py index 50392344d9..f517ae6991 100755 --- a/src/binding/python/openpmd_api/pipe/__main__.py +++ b/src/binding/python/openpmd_api/pipe/__main__.py @@ -8,16 +8,25 @@ Authors: Franz Poeschel License: LGPLv3+ """ -from .. import openpmd_api_cxx as io import argparse import os # os.path.basename import sys # sys.stderr.write +from .. import openpmd_api_cxx as io + # MPI is an optional dependency -try: - from mpi4py import MPI - HAVE_MPI = True -except ImportError: +if io.variants['mpi']: + try: + from mpi4py import MPI + HAVE_MPI = True + except (ImportError, ModuleNotFoundError): + print(""" +openPMD-api was built with support for MPI, +but mpi4py Python package was not found. +Will continue in serial mode.""", + file=sys.stderr) + HAVE_MPI = False +else: HAVE_MPI = False debug = False diff --git a/src/cli/ls.cpp b/src/cli/ls.cpp index bea296a5a2..65bb226602 100644 --- a/src/cli/ls.cpp +++ b/src/cli/ls.cpp @@ -24,14 +24,11 @@ #include #include - -int main( - int argc, - char * argv[] -) +int main(int argc, char *argv[]) { - std::vector< std::string > str_argv; - for( int i = 0; i < argc; ++i ) str_argv.emplace_back(argv[i]); + std::vector str_argv; + for (int i = 0; i < argc; ++i) + str_argv.emplace_back(argv[i]); - return openPMD::cli::ls::run( str_argv ); + return openPMD::cli::ls::run(str_argv); } diff --git a/src/cli/pipe.py b/src/cli/pipe.py index df41971405..1d2ec3a05f 100755 --- a/src/cli/pipe.py +++ b/src/cli/pipe.py @@ -9,9 +9,10 @@ Authors: Franz Poeschel License: LGPLv3+ """ -import openpmd_api.pipe.__main__ as pipe import sys +import openpmd_api.pipe.__main__ as pipe + if __name__ == "__main__": pipe.main() sys.exit() diff --git a/src/config.cpp b/src/config.cpp index 06b9c58640..d37d9cc88e 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -28,23 +28,19 @@ #include #include - -std::map< std::string, bool > -openPMD::getVariants( ) +std::map openPMD::getVariants() { - return std::map< std::string, bool >{ - {"mpi", bool(openPMD_HAVE_MPI)}, - {"json", true}, - {"hdf5", bool(openPMD_HAVE_HDF5)}, - {"adios1", bool(openPMD_HAVE_ADIOS1)}, - {"adios2", bool(openPMD_HAVE_ADIOS2)} - }; + return std::map{ + {"mpi", bool(openPMD_HAVE_MPI)}, + {"json", true}, + {"hdf5", bool(openPMD_HAVE_HDF5)}, + {"adios1", bool(openPMD_HAVE_ADIOS1)}, + {"adios2", bool(openPMD_HAVE_ADIOS2)}}; } -std::vector< std::string > -openPMD::getFileExtensions() +std::vector openPMD::getFileExtensions() { - std::vector< std::string > fext; + std::vector fext; fext.emplace_back("json"); #if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 fext.emplace_back("bp"); diff --git a/src/helper/list_series.cpp b/src/helper/list_series.cpp index 2ce7ca167e..370d8a0e4e 100644 --- a/src/helper/list_series.cpp +++ b/src/helper/list_series.cpp @@ -25,102 +25,142 @@ #include "openPMD/Mesh.hpp" #include "openPMD/ParticleSpecies.hpp" -#include #include #include #include - +#include namespace openPMD { namespace helper { std::ostream & - listSeries( - Series & series, - bool const longer, - std::ostream & out - ) + listSeries(Series &series, bool const longer, std::ostream &out) { out << "openPMD series: " << series.name() << "\n"; out << "openPMD standard: " << series.openPMD() << "\n"; - out << "openPMD extensions: " << series.openPMDextension() << "\n\n"; // TODO improve listing of extensions + out << "openPMD extensions: " << series.openPMDextension() + << "\n\n"; // TODO improve listing of extensions - if( longer ) + if (longer) { out << "data author: "; - try{ out << series.author() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } + try + { + out << series.author() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; + } out << "data created: "; - try{ out << series.date() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } + try + { + out << series.date() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; + } out << "data backend: " << series.backend() << "\n"; out << "generating machine: "; - try{ out << series.machine() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } + try + { + out << series.machine() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; + } out << "generating software: "; - try{ out << series.software(); } catch( no_such_attribute_error const & ) { out << "unknown"; } - out << " (version: "; - try{ out << series.softwareVersion() << ")\n"; } catch( no_such_attribute_error const & ) { out << "unknown)\n"; } + try + { + out << series.software(); + } + catch (no_such_attribute_error const &) + { + out << "unknown"; + } + out << " (version: "; + try + { + out << series.softwareVersion() << ")\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown)\n"; + } out << "generating software dependencies: "; - try{ out << series.softwareDependencies() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } + try + { + out << series.softwareDependencies() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; + } out << "\n"; } - std::set< std::string > meshes; //! unique mesh names in all iterations - std::set< std::string > particles; //! unique particle species names in all iterations + std::set meshes; //! unique mesh names in all iterations + std::set + particles; //! unique particle species names in all iterations out << "number of iterations: " << series.iterations.size(); - if( longer ) + if (longer) out << " (" << series.iterationEncoding() << ")"; out << "\n"; - if( series.iterations.size() > 0u ) + if (series.iterations.size() > 0u) { - if( longer ) + if (longer) out << " all iterations: "; - for( auto const& i : series.readIterations() ) { - if( longer ) + for (auto const &i : series.readIterations()) + { + if (longer) out << i.iterationIndex << " "; // find unique record names std::transform( i.meshes.begin(), i.meshes.end(), - std::inserter( meshes, meshes.end() ), - []( std::pair< std::string, Mesh > const & p ) - { return p.first; } - ); + std::inserter(meshes, meshes.end()), + [](std::pair const &p) { + return p.first; + }); std::transform( i.particles.begin(), i.particles.end(), - std::inserter( particles, particles.end() ), - []( std::pair< std::string, ParticleSpecies > const & p ) - { return p.first; } - ); + std::inserter(particles, particles.end()), + [](std::pair const &p) { + return p.first; + }); } - if( longer ) + if (longer) out << "\n"; } out << "\n"; out << "number of meshes: " << meshes.size() << "\n"; - if( longer && meshes.size() > 0u ) + if (longer && meshes.size() > 0u) { out << " all meshes:\n"; - for( auto const& m : meshes ) + for (auto const &m : meshes) out << " " << m << "\n"; } out << "\n"; out << "number of particle species: " << particles.size() << "\n"; - if( longer && particles.size() > 0u ) + if (longer && particles.size() > 0u) { out << " all particle species:\n"; - for( auto const& p : particles ) + for (auto const &p : particles) out << " " << p << "\n"; } return out; } -} // helper -} // openPMD +} // namespace helper +} // namespace openPMD diff --git a/src/version.cpp b/src/version.cpp index 7cba20f98d..6fa0a9ecfa 100644 --- a/src/version.cpp +++ b/src/version.cpp @@ -23,33 +23,27 @@ #include #include - -std::string -openPMD::getVersion( ) +std::string openPMD::getVersion() { std::stringstream api; - api << OPENPMDAPI_VERSION_MAJOR << "." - << OPENPMDAPI_VERSION_MINOR << "." + api << OPENPMDAPI_VERSION_MAJOR << "." << OPENPMDAPI_VERSION_MINOR << "." << OPENPMDAPI_VERSION_PATCH; - if( std::string( OPENPMDAPI_VERSION_LABEL ).size() > 0 ) + if (std::string(OPENPMDAPI_VERSION_LABEL).size() > 0) api << "-" << OPENPMDAPI_VERSION_LABEL; std::string const apistr = api.str(); return apistr; } -std::string -openPMD::getStandard( ) +std::string openPMD::getStandard() { std::stringstream standard; - standard << OPENPMD_STANDARD_MAJOR << "." - << OPENPMD_STANDARD_MINOR << "." + standard << OPENPMD_STANDARD_MAJOR << "." << OPENPMD_STANDARD_MINOR << "." << OPENPMD_STANDARD_PATCH; std::string const standardstr = standard.str(); return standardstr; } -std::string -openPMD::getStandardMinimum( ) +std::string openPMD::getStandardMinimum() { std::stringstream standardMin; standardMin << OPENPMD_STANDARD_MIN_MAJOR << "." diff --git a/test/AuxiliaryTest.cpp b/test/AuxiliaryTest.cpp index f296913c20..f371aa6ebc 100644 --- a/test/AuxiliaryTest.cpp +++ b/test/AuxiliaryTest.cpp @@ -1,20 +1,20 @@ // expose private and protected members for invasive testing #if openPMD_USE_INVASIVE_TESTS -# define OPENPMD_private public -# define OPENPMD_protected public +#define OPENPMD_private public +#define OPENPMD_protected public #endif -#include "openPMD/config.hpp" -#include "openPMD/backend/Writable.hpp" -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" +#include "openPMD/Dataset.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/IO/AbstractIOHandlerHelper.hpp" #include "openPMD/auxiliary/DerefDynamicCast.hpp" #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/Option.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/auxiliary/Variant.hpp" -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/IO/AbstractIOHandlerHelper.hpp" -#include "openPMD/Dataset.hpp" +#include "openPMD/backend/Attributable.hpp" +#include "openPMD/backend/Container.hpp" +#include "openPMD/backend/Writable.hpp" +#include "openPMD/config.hpp" #include @@ -27,23 +27,23 @@ using namespace openPMD; - namespace openPMD { namespace test { -struct TestHelper : public LegacyAttributable -{ - TestHelper() + struct TestHelper : public LegacyAttributable { - writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); - } -}; -} // test -} // openPMD - - -TEST_CASE( "optional", "[auxiliary]" ) { + TestHelper() + { + writable().IOHandler = + createIOHandler(".", Access::CREATE, Format::JSON); + } + }; +} // namespace test +} // namespace openPMD + +TEST_CASE("optional", "[auxiliary]") +{ using namespace auxiliary; Option opt; @@ -58,45 +58,59 @@ TEST_CASE( "optional", "[auxiliary]" ) { REQUIRE(opt.has_value()); REQUIRE(opt.get() == 43); - Option opt2{ opt }; + Option opt2{opt}; REQUIRE(opt2); REQUIRE(opt2.has_value()); REQUIRE(opt2.get() == 43); - Option opt3 = makeOption( 3 ); + Option opt3 = makeOption(3); REQUIRE(opt3); REQUIRE(opt3.has_value()); REQUIRE(opt3.get() == 3); } - -TEST_CASE( "deref_cast_test", "[auxiliary]" ) { +TEST_CASE("deref_cast_test", "[auxiliary]") +{ using namespace auxiliary; - struct A { double m_x; A(double x) : m_x(x){} virtual ~A() = default; }; - struct B : virtual A { B(double x) : A(x){}}; - struct C { float m_x; }; + struct A + { + double m_x; + A(double x) : m_x(x) + {} + virtual ~A() = default; + }; + struct B : virtual A + { + B(double x) : A(x) + {} + }; + struct C + { + float m_x; + }; B const value = {123.45}; - B const * const ptr = &value; + B const *const ptr = &value; auto const a = deref_dynamic_cast(ptr); - auto const& ra = deref_dynamic_cast(ptr); + auto const &ra = deref_dynamic_cast(ptr); (void)a; (void)ra; REQUIRE_THROWS_AS(deref_dynamic_cast(ptr), std::runtime_error); - A * const nptr = nullptr; + A *const nptr = nullptr; REQUIRE_THROWS_AS(deref_dynamic_cast(nptr), std::runtime_error); } -TEST_CASE( "string_test", "[auxiliary]" ) +TEST_CASE("string_test", "[auxiliary]") { using namespace auxiliary; - std::string s = "Man muss noch Chaos in sich haben, " - "um einen tanzenden Stern gebaeren zu koennen."; + std::string s = + "Man muss noch Chaos in sich haben, " + "um einen tanzenden Stern gebaeren zu koennen."; REQUIRE(starts_with(s, 'M')); REQUIRE(starts_with(s, "Man")); REQUIRE(starts_with(s, "Man muss noch")); @@ -114,24 +128,32 @@ TEST_CASE( "string_test", "[auxiliary]" ) REQUIRE("String" == replace_first("string", "s", "S")); REQUIRE("sTRING" == replace_first("string", "tring", "TRING")); REQUIRE("string" == replace_first("string", " ", "_")); - REQUIRE("strinGstringstring" == replace_first("stringstringstring", "g", "G")); - REQUIRE("#stringstring" == replace_first("stringstringstring", "string", "#")); + REQUIRE( + "strinGstringstring" == replace_first("stringstringstring", "g", "G")); + REQUIRE( + "#stringstring" == replace_first("stringstringstring", "string", "#")); - REQUIRE("stringstringstrinG" == replace_last("stringstringstring", "g", "G")); - REQUIRE("stringstring#" == replace_last("stringstringstring", "string", "#")); + REQUIRE( + "stringstringstrinG" == replace_last("stringstringstring", "g", "G")); + REQUIRE( + "stringstring#" == replace_last("stringstringstring", "string", "#")); REQUIRE("/normal/path" == replace_all("////normal//////path", "//", "/")); - std::vector< std::string > expected1{"0", "string", " ", "1234", "te st"}; - std::vector< std::string > expected2{"0_DELIM_", "string_DELIM_", " _DELIM_", "1234_DELIM_", "te st_DELIM_"}; - std::vector< std::string > expected3{"path", "to", "relevant", "data"}; - std::string s2 = "_DELIM_0_DELIM_string_DELIM_ _DELIM_1234_DELIM_te st_DELIM_"; + std::vector expected1{"0", "string", " ", "1234", "te st"}; + std::vector expected2{ + "0_DELIM_", "string_DELIM_", " _DELIM_", "1234_DELIM_", "te st_DELIM_"}; + std::vector expected3{"path", "to", "relevant", "data"}; + std::string s2 = + "_DELIM_0_DELIM_string_DELIM_ _DELIM_1234_DELIM_te st_DELIM_"; REQUIRE(expected1 == split(s2, "_DELIM_", false)); REQUIRE(expected2 == split(s2, "_DELIM_", true)); REQUIRE(expected3 == split("/path/to/relevant/data/", "/")); - REQUIRE("stringstringstring" == strip("\t string\tstring string\0", { '\0', '\t', ' '})); - REQUIRE("stringstringstring" == strip("stringstringstring", { })); + REQUIRE( + "stringstringstring" == + strip("\t string\tstring string\0", {'\0', '\t', ' '})); + REQUIRE("stringstringstring" == strip("stringstringstring", {})); REQUIRE("1,2,3,4" == join({"1", "2", "3", "4"}, ",")); REQUIRE("1234" == join({"1", "2", "3", "4"}, "")); @@ -145,19 +167,18 @@ namespace openPMD { namespace test { -struct S : public TestHelper -{ - S() - : TestHelper() - { } -}; -} // test -} // openPMD - -TEST_CASE( "container_default_test", "[auxiliary]") + struct S : public TestHelper + { + S() : TestHelper() + {} + }; +} // namespace test +} // namespace openPMD + +TEST_CASE("container_default_test", "[auxiliary]") { #if openPMD_USE_INVASIVE_TESTS - Container< openPMD::test::S > c = Container< openPMD::test::S >(); + Container c = Container(); c.writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); REQUIRE(c.empty()); @@ -171,31 +192,41 @@ namespace openPMD { namespace test { -struct structure : public TestHelper -{ - structure() - : TestHelper() - { } - - std::string string_ = "Hello, world!"; - int int_ = 42; - float float_ = 3.14f; + struct structure : public TestHelper + { + structure() : TestHelper() + {} - std::string text() const { return variantSrc::get< std::string >(getAttribute("text").getResource()); } - structure& setText(std::string newText) { setAttribute("text", newText); return *this; } -}; -} // test -} // openPMD + std::string string_ = "Hello, world!"; + int int_ = 42; + float float_ = 3.14f; -TEST_CASE( "container_retrieve_test", "[auxiliary]" ) + std::string text() const + { + return variantSrc::get( + getAttribute("text").getResource()); + } + structure &setText(std::string newText) + { + setAttribute("text", newText); + return *this; + } + }; +} // namespace test +} // namespace openPMD + +TEST_CASE("container_retrieve_test", "[auxiliary]") { #if openPMD_USE_INVASIVE_TESTS using structure = openPMD::test::structure; - Container< structure > c = Container< structure >(); + Container c = Container(); c.writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); structure s; - std::string text = "The openPMD standard, short for open standard for particle-mesh data files is not a file format per se. It is a standard for meta data and naming schemes."; + std::string text = + "The openPMD standard, short for open standard for particle-mesh data " + "files is not a file format per se. It is a standard for meta data and " + "naming schemes."; s.setText(text); c["entry"] = s; REQUIRE(c["entry"].string_ == "Hello, world!"); @@ -204,7 +235,6 @@ TEST_CASE( "container_retrieve_test", "[auxiliary]" ) REQUIRE(c["entry"].text() == text); REQUIRE(s.text() == text); - structure s2 = c["entry"]; REQUIRE(s2.string_ == "Hello, world!"); REQUIRE(s2.int_ == 42); @@ -212,7 +242,6 @@ TEST_CASE( "container_retrieve_test", "[auxiliary]" ) REQUIRE(s2.text() == text); REQUIRE(c["entry"].text() == text); - s2.string_ = "New string"; s2.int_ = -1; s2.float_ = 0.0f; @@ -250,24 +279,22 @@ namespace openPMD { namespace test { -struct Widget : public TestHelper -{ - Widget() - : TestHelper() - { } - - Widget(int) - : TestHelper() - { } -}; -} // test -} // openPMD - -TEST_CASE( "container_access_test", "[auxiliary]" ) + struct Widget : public TestHelper + { + Widget() : TestHelper() + {} + + Widget(int) : TestHelper() + {} + }; +} // namespace test +} // namespace openPMD + +TEST_CASE("container_access_test", "[auxiliary]") { #if openPMD_USE_INVASIVE_TESTS using Widget = openPMD::test::Widget; - Container< Widget > c = Container< Widget >(); + Container c = Container(); c.writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); c["1firstWidget"] = Widget(0); @@ -300,7 +327,7 @@ TEST_CASE( "container_access_test", "[auxiliary]" ) #endif } -TEST_CASE( "attributable_default_test", "[auxiliary]" ) +TEST_CASE("attributable_default_test", "[auxiliary]") { LegacyAttributable a; @@ -311,38 +338,37 @@ namespace openPMD { namespace test { -struct AttributedWidget : public TestHelper -{ - AttributedWidget() - : TestHelper() - { } - - Attribute::resource get(std::string key) + struct AttributedWidget : public TestHelper { - return getAttribute(key).getResource(); - } -}; -} // test -} // openPMD + AttributedWidget() : TestHelper() + {} -TEST_CASE( "attributable_access_test", "[auxiliary]" ) + Attribute::resource get(std::string key) + { + return getAttribute(key).getResource(); + } + }; +} // namespace test +} // namespace openPMD + +TEST_CASE("attributable_access_test", "[auxiliary]") { using AttributedWidget = openPMD::test::AttributedWidget; AttributedWidget a = AttributedWidget(); a.setAttribute("key", std::string("value")); REQUIRE(a.numAttributes() == 1); - REQUIRE(variantSrc::get< std::string >(a.get("key")) == "value"); + REQUIRE(variantSrc::get(a.get("key")) == "value"); a.setAttribute("key", std::string("newValue")); REQUIRE(a.numAttributes() == 1); - REQUIRE(variantSrc::get< std::string >(a.get("key")) == "newValue"); + REQUIRE(variantSrc::get(a.get("key")) == "newValue"); - using array_t = std::array< double, 7 >; + using array_t = std::array; array_t arr{{1, 2, 3, 4, 5, 6, 7}}; a.setAttribute("array", arr); REQUIRE(a.numAttributes() == 2); - REQUIRE(variantSrc::get< array_t >(a.get("array")) == arr); + REQUIRE(variantSrc::get(a.get("array")) == arr); REQUIRE(a.deleteAttribute("nonExistentKey") == false); REQUIRE(a.numAttributes() == 2); REQUIRE(a.deleteAttribute("key") == true); @@ -359,27 +385,48 @@ namespace openPMD { namespace test { -struct Dotty : public TestHelper -{ - Dotty() - : TestHelper() + struct Dotty : public TestHelper { - setAtt1(1); - setAtt2(2); - setAtt3("3"); - } - - int att1() const { return variantSrc::get< int >(getAttribute("att1").getResource()); } - double att2() const { return variantSrc::get< double >(getAttribute("att2").getResource()); } - std::string att3() const { return variantSrc::get< std::string >(getAttribute("att3").getResource()); } - Dotty& setAtt1(int i) { setAttribute("att1", i); return *this; } - Dotty& setAtt2(double d) { setAttribute("att2", d); return *this; } - Dotty& setAtt3(std::string s) { setAttribute("att3", s); return *this; } -}; -} // test -} // openPMD - -TEST_CASE( "dot_test", "[auxiliary]" ) + Dotty() : TestHelper() + { + setAtt1(1); + setAtt2(2); + setAtt3("3"); + } + + int att1() const + { + return variantSrc::get(getAttribute("att1").getResource()); + } + double att2() const + { + return variantSrc::get(getAttribute("att2").getResource()); + } + std::string att3() const + { + return variantSrc::get( + getAttribute("att3").getResource()); + } + Dotty &setAtt1(int i) + { + setAttribute("att1", i); + return *this; + } + Dotty &setAtt2(double d) + { + setAttribute("att2", d); + return *this; + } + Dotty &setAtt3(std::string s) + { + setAttribute("att3", s); + return *this; + } + }; +} // namespace test +} // namespace openPMD + +TEST_CASE("dot_test", "[auxiliary]") { openPMD::test::Dotty d; REQUIRE(d.att1() == 1); @@ -392,7 +439,7 @@ TEST_CASE( "dot_test", "[auxiliary]" ) REQUIRE(d.att3() == "30"); } -TEST_CASE( "filesystem_test", "[auxiliary]" ) +TEST_CASE("filesystem_test", "[auxiliary]") { using auxiliary::create_directories; using auxiliary::file_exists; @@ -401,27 +448,25 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) using auxiliary::remove_directory; using auxiliary::remove_file; - auto contains = - [](std::vector< std::string > const & entries, std::string const & path) -> bool - { return std::find(entries.cbegin(), entries.cend(), path) != entries.cend(); }; - - auto random_string = - [](std::string::size_type length) -> std::string - { - auto randchar = - []() -> char - { - char const charset[] = - "0123456789" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz"; - size_t const max_index = (sizeof(charset) - 1); - return charset[rand() % max_index]; - }; - std::string str(length, 0); - std::generate_n(str.begin(), length, randchar); - return str; + auto contains = [](std::vector const &entries, + std::string const &path) -> bool { + return std::find(entries.cbegin(), entries.cend(), path) != + entries.cend(); + }; + + auto random_string = [](std::string::size_type length) -> std::string { + auto randchar = []() -> char { + char const charset[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + size_t const max_index = (sizeof(charset) - 1); + return charset[rand() % max_index]; }; + std::string str(length, 0); + std::generate_n(str.begin(), length, randchar); + return str; + }; #ifdef _WIN32 REQUIRE(directory_exists("C:\\")); @@ -436,7 +481,7 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) REQUIRE(!contains(dir_entries, "nonexistent_folder_in_C_drive")); std::string new_directory = random_string(10); - while( directory_exists(new_directory) ) + while (directory_exists(new_directory)) new_directory = random_string(10); REQUIRE(create_directories(new_directory)); REQUIRE(create_directories(new_directory)); @@ -458,9 +503,9 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) REQUIRE(!remove_file(".\\nonexistent_file_in_cmake_bin_directory")); #else REQUIRE(directory_exists("/")); - //REQUIRE(directory_exists("/boot")); - //REQUIRE(directory_exists("/etc")); - //REQUIRE(directory_exists("/home")); + // REQUIRE(directory_exists("/boot")); + // REQUIRE(directory_exists("/etc")); + // REQUIRE(directory_exists("/home")); REQUIRE(!directory_exists("/nonexistent_folder_in_root_directory")); REQUIRE(directory_exists("../bin")); @@ -469,14 +514,14 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) auto dir_entries = list_directory("/"); REQUIRE(!dir_entries.empty()); - //REQUIRE(contains(dir_entries, "boot")); - //REQUIRE(contains(dir_entries, "etc")); - //REQUIRE(contains(dir_entries, "home")); - //REQUIRE(contains(dir_entries, "root")); + // REQUIRE(contains(dir_entries, "boot")); + // REQUIRE(contains(dir_entries, "etc")); + // REQUIRE(contains(dir_entries, "home")); + // REQUIRE(contains(dir_entries, "root")); REQUIRE(!contains(dir_entries, "nonexistent_folder_in_root_directory")); std::string new_directory = random_string(10); - while( directory_exists(new_directory) ) + while (directory_exists(new_directory)) new_directory = random_string(10); std::string new_sub_directory = new_directory + "/" + random_string(10); REQUIRE(create_directories(new_sub_directory)); diff --git a/test/CatchMain.cpp b/test/CatchMain.cpp index aeef03d840..4ed06df1f7 100644 --- a/test/CatchMain.cpp +++ b/test/CatchMain.cpp @@ -1,4 +1,2 @@ #define CATCH_CONFIG_MAIN #include - - diff --git a/test/CatchRunner.cpp b/test/CatchRunner.cpp index 3603e6ef70..d24a5b27e6 100644 --- a/test/CatchRunner.cpp +++ b/test/CatchRunner.cpp @@ -2,7 +2,7 @@ #include #if openPMD_HAVE_MPI -# include +#include int main(int argc, char *argv[]) { @@ -12,9 +12,9 @@ int main(int argc, char *argv[]) int result = 0; { // Indicates a command line parsing - result = session.applyCommandLine( argc, argv ); + result = session.applyCommandLine(argc, argv); // RT tests - if( result == 0 ) + if (result == 0) result = session.run(); } MPI_Finalize(); @@ -27,9 +27,9 @@ int main(int argc, char *argv[]) int result = 0; { // Indicates a command line parsing - result = session.applyCommandLine( argc, argv ); + result = session.applyCommandLine(argc, argv); // RT tests - if( result == 0 ) + if (result == 0) result = session.run(); } return result; diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 20ce35e8ed..cffea7009f 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1,150 +1,151 @@ // expose private and protected members for invasive testing #if openPMD_USE_INVASIVE_TESTS -# define OPENPMD_private public -# define OPENPMD_protected public +#define OPENPMD_private public +#define OPENPMD_protected public #endif #include "openPMD/openPMD.hpp" #include #include -#include -#include #include #include #include #include -#include #include +#include +#include +#include using namespace openPMD; -TEST_CASE( "versions_test", "[core]" ) +TEST_CASE("versions_test", "[core]") { - auto const apiVersion = getVersion( ); - auto const is_dot = []( char const c ){ return c == '.'; }; + auto const apiVersion = getVersion(); + auto const is_dot = [](char const c) { return c == '.'; }; REQUIRE(2u == std::count_if(apiVersion.begin(), apiVersion.end(), is_dot)); - auto const standard = getStandard( ); + auto const standard = getStandard(); REQUIRE(standard == "1.1.0"); - auto const standardMin = getStandardMinimum( ); + auto const standardMin = getStandardMinimum(); REQUIRE(standardMin == "1.0.0"); - auto const featureVariants = getVariants( ); + auto const featureVariants = getVariants(); REQUIRE(featureVariants.at("json") == true); } -TEST_CASE( "attribute_dtype_test", "[core]" ) +TEST_CASE("attribute_dtype_test", "[core]") { // note: due to a C++17 issue with ICC 19.1.2 we write the // T value to variant conversion explicitly // https://github.com/openPMD/openPMD-api/pull/... // Attribute a = Attribute(static_cast< char >(' ')); - Attribute a = Attribute(static_cast(static_cast< char >(' '))); + Attribute a = + Attribute(static_cast(static_cast(' '))); REQUIRE(Datatype::CHAR == a.dtype); - a = Attribute(static_cast< unsigned char >(' ')); + a = Attribute(static_cast(' ')); REQUIRE(Datatype::UCHAR == a.dtype); - a = Attribute(static_cast< short >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::SHORT == a.dtype); - a = Attribute(static_cast< int >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::INT == a.dtype); - a = Attribute(static_cast< long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::LONG == a.dtype); - a = Attribute(static_cast< long long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::LONGLONG == a.dtype); - a = Attribute(static_cast< unsigned short >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::USHORT == a.dtype); - a = Attribute(static_cast< unsigned int >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::UINT == a.dtype); - a = Attribute(static_cast< unsigned long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::ULONG == a.dtype); - a = Attribute(static_cast< unsigned long long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::ULONGLONG == a.dtype); - a = Attribute(static_cast< float >(0.)); + a = Attribute(static_cast(0.)); REQUIRE(Datatype::FLOAT == a.dtype); - a = Attribute(static_cast< double >(0.)); + a = Attribute(static_cast(0.)); REQUIRE(Datatype::DOUBLE == a.dtype); - a = Attribute(static_cast< long double >(0.)); + a = Attribute(static_cast(0.)); REQUIRE(Datatype::LONG_DOUBLE == a.dtype); - a = Attribute(static_cast< std::complex< float > >(0.)); + a = Attribute(static_cast >(0.)); REQUIRE(Datatype::CFLOAT == a.dtype); - a = Attribute(static_cast< std::complex< double > >(0.)); + a = Attribute(static_cast >(0.)); REQUIRE(Datatype::CDOUBLE == a.dtype); - a = Attribute(static_cast< std::complex< long double > >(0.)); + a = Attribute(static_cast >(0.)); REQUIRE(Datatype::CLONG_DOUBLE == a.dtype); a = Attribute(std::string("")); REQUIRE(Datatype::STRING == a.dtype); - a = Attribute(std::vector< char >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_CHAR == a.dtype); - a = Attribute(std::vector< short >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_SHORT == a.dtype); - a = Attribute(std::vector< int >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_INT == a.dtype); - a = Attribute(std::vector< long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_LONG == a.dtype); - a = Attribute(std::vector< long long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_LONGLONG == a.dtype); - a = Attribute(std::vector< unsigned char >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_UCHAR == a.dtype); - a = Attribute(std::vector< unsigned short >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_USHORT == a.dtype); - a = Attribute(std::vector< unsigned int >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_UINT == a.dtype); - a = Attribute(std::vector< unsigned long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_ULONG == a.dtype); - a = Attribute(std::vector< unsigned long long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_ULONGLONG == a.dtype); - a = Attribute(std::vector< float >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_FLOAT == a.dtype); - a = Attribute(std::vector< double >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_DOUBLE == a.dtype); - a = Attribute(std::vector< long double >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_LONG_DOUBLE == a.dtype); - a = Attribute(std::vector< std::string >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_STRING == a.dtype); - a = Attribute(std::array< double, 7 >()); + a = Attribute(std::array()); REQUIRE(Datatype::ARR_DBL_7 == a.dtype); - a = Attribute(static_cast< bool >(false)); + a = Attribute(static_cast(false)); REQUIRE(Datatype::BOOL == a.dtype); // fixed size integers - a = Attribute(static_cast< int16_t >(0)); - REQUIRE(determineDatatype< int16_t >() == a.dtype); - a = Attribute(static_cast< int32_t >(0)); - REQUIRE(determineDatatype< int32_t >() == a.dtype); - a = Attribute(static_cast< int64_t >(0)); - REQUIRE(determineDatatype< int64_t >() == a.dtype); - a = Attribute(static_cast< uint16_t >(0)); - REQUIRE(determineDatatype< uint16_t >() == a.dtype); - a = Attribute(static_cast< uint32_t >(0)); - REQUIRE(determineDatatype< uint32_t >() == a.dtype); - a = Attribute(static_cast< uint64_t >(0)); - REQUIRE(determineDatatype< uint64_t >() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); // TODO fixed size floats // same implementation types (not necessary aliases) detection - if( sizeof(long) == sizeof(long long) ) + if (sizeof(long) == sizeof(long long)) { - a = Attribute(static_cast< long >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::LONGLONG, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::LONGLONG == a.dtype); #endif - a = Attribute(static_cast< long long >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::LONG, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::LONG == a.dtype); #endif } - if( sizeof(int) == sizeof(long) ) + if (sizeof(int) == sizeof(long)) { - a = Attribute(static_cast< long >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::INT, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::INT == a.dtype); #endif - a = Attribute(static_cast< int >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::LONG, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::LONG == a.dtype); @@ -152,116 +153,111 @@ TEST_CASE( "attribute_dtype_test", "[core]" ) } } -TEST_CASE( "myPath", "[core]" ) +TEST_CASE("myPath", "[core]") { #if openPMD_USE_INVASIVE_TESTS - using vec_t = std::vector< std::string >; - auto pathOf = []( AttributableInterface & attr ) - { + using vec_t = std::vector; + auto pathOf = [](AttributableInterface &attr) { auto res = attr.myPath(); #if false std::cout << "Directory:\t" << res.directory << "\nSeries name:\t" << res.seriesName << "\nSeries ext:\t" << res.seriesExtension << std::endl; #endif - REQUIRE( res.directory == "../samples/" ); - REQUIRE( res.seriesName == "myPath" ); - REQUIRE( res.seriesExtension == ".json" ); - REQUIRE( res.filePath() == "../samples/myPath.json" ); + REQUIRE(res.directory == "../samples/"); + REQUIRE(res.seriesName == "myPath"); + REQUIRE(res.seriesExtension == ".json"); + REQUIRE(res.filePath() == "../samples/myPath.json"); return res.group; }; - Series series( "../samples/myPath.json", Access::CREATE ); - REQUIRE( pathOf( series ) == vec_t{} ); - auto iteration = series.iterations[ 1234 ]; - REQUIRE( pathOf( iteration ) == vec_t{ "iterations", "1234" } ); + Series series("../samples/myPath.json", Access::CREATE); + REQUIRE(pathOf(series) == vec_t{}); + auto iteration = series.iterations[1234]; + REQUIRE(pathOf(iteration) == vec_t{"iterations", "1234"}); - auto writeSomething = []( auto & recordComponent ) - { - recordComponent.resetDataset( { Datatype::INT, { 100 } } ); - recordComponent.template makeConstant< int >( 5678 ); + auto writeSomething = [](auto &recordComponent) { + recordComponent.resetDataset({Datatype::INT, {100}}); + recordComponent.template makeConstant(5678); }; - REQUIRE( - pathOf( iteration.meshes ) == vec_t{ "iterations", "1234", "meshes" } ); + REQUIRE(pathOf(iteration.meshes) == vec_t{"iterations", "1234", "meshes"}); - auto scalarMesh = iteration.meshes[ "e_chargeDensity" ]; + auto scalarMesh = iteration.meshes["e_chargeDensity"]; REQUIRE( - pathOf( scalarMesh ) == - vec_t{ "iterations", "1234", "meshes", "e_chargeDensity" } ); - auto scalarMeshComponent = scalarMesh[ RecordComponent::SCALAR ]; + pathOf(scalarMesh) == + vec_t{"iterations", "1234", "meshes", "e_chargeDensity"}); + auto scalarMeshComponent = scalarMesh[RecordComponent::SCALAR]; REQUIRE( - pathOf( scalarMeshComponent ) == + pathOf(scalarMeshComponent) == vec_t{ "iterations", "1234", "meshes", "e_chargeDensity", - RecordComponent::SCALAR } ); - writeSomething( scalarMeshComponent ); + RecordComponent::SCALAR}); + writeSomething(scalarMeshComponent); - auto vectorMesh = iteration.meshes[ "E" ]; + auto vectorMesh = iteration.meshes["E"]; + REQUIRE(pathOf(vectorMesh) == vec_t{"iterations", "1234", "meshes", "E"}); + auto vectorMeshComponent = vectorMesh["x"]; REQUIRE( - pathOf( vectorMesh ) == vec_t{ "iterations", "1234", "meshes", "E" } ); - auto vectorMeshComponent = vectorMesh[ "x" ]; - REQUIRE( - pathOf( vectorMeshComponent ) == - vec_t{ "iterations", "1234", "meshes", "E", "x" } ); + pathOf(vectorMeshComponent) == + vec_t{"iterations", "1234", "meshes", "E", "x"}); REQUIRE( - pathOf( iteration.particles ) == - vec_t{ "iterations", "1234", "particles" } ); + pathOf(iteration.particles) == + vec_t{"iterations", "1234", "particles"}); - auto speciesE = iteration.particles[ "e" ]; - REQUIRE( - pathOf( speciesE ) == vec_t{ "iterations", "1234", "particles", "e" } ); + auto speciesE = iteration.particles["e"]; + REQUIRE(pathOf(speciesE) == vec_t{"iterations", "1234", "particles", "e"}); - auto speciesPosition = speciesE[ "position" ]; + auto speciesPosition = speciesE["position"]; REQUIRE( - pathOf( speciesPosition ) == - vec_t{ "iterations", "1234", "particles", "e", "position" } ); + pathOf(speciesPosition) == + vec_t{"iterations", "1234", "particles", "e", "position"}); - auto speciesPositionX = speciesPosition[ "x" ]; + auto speciesPositionX = speciesPosition["x"]; REQUIRE( - pathOf( speciesPositionX ) == - vec_t{ "iterations", "1234", "particles", "e", "position", "x" } ); - writeSomething( speciesPositionX ); + pathOf(speciesPositionX) == + vec_t{"iterations", "1234", "particles", "e", "position", "x"}); + writeSomething(speciesPositionX); - auto speciesWeighting = speciesE[ "weighting" ]; + auto speciesWeighting = speciesE["weighting"]; REQUIRE( - pathOf( speciesWeighting ) == - vec_t{ "iterations", "1234", "particles", "e", "weighting" } ); + pathOf(speciesWeighting) == + vec_t{"iterations", "1234", "particles", "e", "weighting"}); - auto speciesWeightingX = speciesWeighting[ RecordComponent::SCALAR ]; + auto speciesWeightingX = speciesWeighting[RecordComponent::SCALAR]; REQUIRE( - pathOf( speciesWeightingX ) == + pathOf(speciesWeightingX) == vec_t{ "iterations", "1234", "particles", "e", "weighting", - RecordComponent::SCALAR } ); - writeSomething( speciesWeightingX ); + RecordComponent::SCALAR}); + writeSomething(speciesWeightingX); REQUIRE( - pathOf( speciesE.particlePatches ) == - vec_t{ "iterations", "1234", "particles", "e", "particlePatches" } ); + pathOf(speciesE.particlePatches) == + vec_t{"iterations", "1234", "particles", "e", "particlePatches"}); - auto patchExtent = speciesE.particlePatches[ "extent" ]; + auto patchExtent = speciesE.particlePatches["extent"]; REQUIRE( - pathOf( patchExtent ) == + pathOf(patchExtent) == vec_t{ "iterations", "1234", "particles", "e", "particlePatches", - "extent" } ); + "extent"}); - auto patchExtentX = patchExtent[ "x" ]; + auto patchExtentX = patchExtent["x"]; REQUIRE( - pathOf( patchExtentX ) == + pathOf(patchExtentX) == vec_t{ "iterations", "1234", @@ -269,23 +265,23 @@ TEST_CASE( "myPath", "[core]" ) "e", "particlePatches", "extent", - "x" } ); + "x"}); - auto patchNumParticles = speciesE.particlePatches[ "numParticles" ]; + auto patchNumParticles = speciesE.particlePatches["numParticles"]; REQUIRE( - pathOf( patchNumParticles ) == + pathOf(patchNumParticles) == vec_t{ "iterations", "1234", "particles", "e", "particlePatches", - "numParticles" } ); + "numParticles"}); auto patchNumParticlesComponent = - patchNumParticles[ RecordComponent::SCALAR ]; + patchNumParticles[RecordComponent::SCALAR]; REQUIRE( - pathOf( patchNumParticlesComponent ) == + pathOf(patchNumParticlesComponent) == vec_t{ "iterations", "1234", @@ -293,11 +289,11 @@ TEST_CASE( "myPath", "[core]" ) "e", "particlePatches", "numParticles", - RecordComponent::SCALAR } ); + RecordComponent::SCALAR}); #endif } -TEST_CASE( "output_default_test", "[core]" ) +TEST_CASE("output_default_test", "[core]") { using IE = IterationEncoding; Series o = Series("./new_openpmd_output_%T.json", Access::CREATE); @@ -308,20 +304,24 @@ TEST_CASE( "output_default_test", "[core]" ) REQUIRE(o.iterationEncoding() == IE::fileBased); REQUIRE(o.iterationFormat() == "new_openpmd_output_%T"); REQUIRE(o.iterations.empty()); - REQUIRE(o.numAttributes() == 8); /* openPMD, openPMDextension, basePath, iterationEncoding, iterationFormat, date, software, softwareVersion */ + REQUIRE( + o.numAttributes() == + 8); /* openPMD, openPMDextension, basePath, iterationEncoding, + iterationFormat, date, software, softwareVersion */ REQUIRE(o.name() == "new_openpmd_output_%T"); o.iterations[0]; } -TEST_CASE( "output_constructor_test", "[core]" ) +TEST_CASE("output_constructor_test", "[core]") { using IE = IterationEncoding; Series o = Series("./MyCustomOutput.json", Access::CREATE); o.setMeshesPath("customMeshesPath").setParticlesPath("customParticlesPath"); - o.iterations[1].meshes["foo"]["baz"].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[1].meshes["foo"]["baz"].resetDataset( + Dataset(Datatype::DOUBLE, {1})); auto species = o.iterations[1].particles["bar"]; auto dset = Dataset(Datatype::DOUBLE, {1}); species["position"][RecordComponent::SCALAR].resetDataset(dset); @@ -335,11 +335,15 @@ TEST_CASE( "output_constructor_test", "[core]" ) REQUIRE(o.iterationEncoding() == IE::groupBased); REQUIRE(o.iterationFormat() == "/data/%T/"); REQUIRE(o.iterations.size() == 1); - REQUIRE(o.numAttributes() == 10); /* openPMD, openPMDextension, basePath, meshesPath, particlesPath, iterationEncoding, iterationFormat, date, software, softwareVersion */ + REQUIRE( + o.numAttributes() == + 10); /* openPMD, openPMDextension, basePath, meshesPath, particlesPath, + iterationEncoding, iterationFormat, date, software, + softwareVersion */ REQUIRE(o.name() == "MyCustomOutput"); } -TEST_CASE( "output_modification_test", "[core]" ) +TEST_CASE("output_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); @@ -364,45 +368,45 @@ TEST_CASE( "output_modification_test", "[core]" ) o.iterations[0]; } -TEST_CASE( "iteration_default_test", "[core]" ) +TEST_CASE("iteration_default_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); - Iteration& i = o.iterations[42]; + Iteration &i = o.iterations[42]; - REQUIRE(i.time< double >() == static_cast(0)); - REQUIRE(i.dt< double >() == static_cast(1)); + REQUIRE(i.time() == static_cast(0)); + REQUIRE(i.dt() == static_cast(1)); REQUIRE(i.timeUnitSI() == static_cast(1)); REQUIRE(i.numAttributes() == 3); REQUIRE(i.meshes.empty()); REQUIRE(i.particles.empty()); } -TEST_CASE( "iteration_modification_test", "[core]" ) +TEST_CASE("iteration_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); - Iteration& i = o.iterations[42]; + Iteration &i = o.iterations[42]; float time = 0.314f; i.setTime(time); - REQUIRE(i.time< float >() == time); + REQUIRE(i.time() == time); double dt = 0.42; i.setDt(dt); - REQUIRE(i.dt< long double >() == static_cast< long double >(dt)); + REQUIRE(i.dt() == static_cast(dt)); i.setTimeUnitSI(0.000000000001); - REQUIRE(i.timeUnitSI() == static_cast< double >(0.000000000001)); + REQUIRE(i.timeUnitSI() == static_cast(0.000000000001)); } -TEST_CASE( "particleSpecies_modification_test", "[core]" ) +TEST_CASE("particleSpecies_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); - auto& particles = o.iterations[42].particles; + auto &particles = o.iterations[42].particles; REQUIRE(0 == particles.numAttributes()); - auto& species = particles["species"]; + auto &species = particles["species"]; REQUIRE(1 == particles.size()); REQUIRE(1 == particles.count("species")); REQUIRE(0 == species.numAttributes()); @@ -410,27 +414,26 @@ TEST_CASE( "particleSpecies_modification_test", "[core]" ) species["position"][RecordComponent::SCALAR].resetDataset(dset); species["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); REQUIRE(1 == species.count("positionOffset")); - auto& patches = species.particlePatches; + auto &patches = species.particlePatches; REQUIRE(2 == patches.size()); REQUIRE(0 == patches.numAttributes()); - auto& offset = patches["offset"]; + auto &offset = patches["offset"]; REQUIRE(0 == offset.size()); - REQUIRE(1 == offset.numAttributes()); //unitDimension - std::array< double, 7 > zeros{{0., 0., 0., 0., 0., 0., 0.}}; + REQUIRE(1 == offset.numAttributes()); // unitDimension + std::array zeros{{0., 0., 0., 0., 0., 0., 0.}}; REQUIRE(zeros == offset.unitDimension()); - auto& off_x = offset["x"]; + auto &off_x = offset["x"]; off_x.resetDataset(dset); REQUIRE(1 == off_x.unitSI()); } - -TEST_CASE( "record_constructor_test", "[core]" ) +TEST_CASE("record_constructor_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); ParticleSpecies ps = o.iterations[42].particles["species"]; - Record& r = ps["record"]; + Record &r = ps["record"]; auto dset = Dataset(Datatype::DOUBLE, {1}); ps["position"][RecordComponent::SCALAR].resetDataset(dset); ps["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); @@ -441,46 +444,43 @@ TEST_CASE( "record_constructor_test", "[core]" ) REQUIRE(r["y"].numAttributes() == 1); /* unitSI */ REQUIRE(r["z"].unitSI() == 1); REQUIRE(r["z"].numAttributes() == 1); /* unitSI */ - std::array< double, 7 > zeros{{0., 0., 0., 0., 0., 0., 0.}}; + std::array zeros{{0., 0., 0., 0., 0., 0., 0.}}; REQUIRE(r.unitDimension() == zeros); - REQUIRE(r.timeOffset< float >() == static_cast(0)); + REQUIRE(r.timeOffset() == static_cast(0)); REQUIRE(r.numAttributes() == 2); /* timeOffset, unitDimension */ } -TEST_CASE( "record_modification_test", "[core]" ) +TEST_CASE("record_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); auto species = o.iterations[42].particles["species"]; - Record& r = species["position"]; + Record &r = species["position"]; auto dset = Dataset(Datatype::DOUBLE, {1}); species["position"][RecordComponent::SCALAR].resetDataset(dset); species["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); using RUD = UnitDimension; - r.setUnitDimension({{RUD::L, 1.}, - {RUD::M, 1.}, - {RUD::T, -3.}, - {RUD::I, -1.}}); - std::array< double, 7 > e_field_unitDimension{{1., 1., -3., -1., 0., 0., 0.}}; + r.setUnitDimension( + {{RUD::L, 1.}, {RUD::M, 1.}, {RUD::T, -3.}, {RUD::I, -1.}}); + std::array e_field_unitDimension{{1., 1., -3., -1., 0., 0., 0.}}; REQUIRE(r.unitDimension() == e_field_unitDimension); - r.setUnitDimension({{RUD::L, 0.}, - {RUD::T, -2.}}); - std::array< double, 7 > b_field_unitDimension{{0., 1., -2., -1., 0., 0., 0.}}; + r.setUnitDimension({{RUD::L, 0.}, {RUD::T, -2.}}); + std::array b_field_unitDimension{{0., 1., -2., -1., 0., 0., 0.}}; REQUIRE(r.unitDimension() == b_field_unitDimension); float timeOffset = 0.314f; r.setTimeOffset(timeOffset); - REQUIRE(r.timeOffset< float >() == timeOffset); + REQUIRE(r.timeOffset() == timeOffset); } -TEST_CASE( "recordComponent_modification_test", "[core]" ) +TEST_CASE("recordComponent_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); ParticleSpecies ps = o.iterations[42].particles["species"]; - Record& r = ps["record"]; + Record &r = ps["record"]; auto dset = Dataset(Datatype::DOUBLE, {1}); ps["position"][RecordComponent::SCALAR].resetDataset(dset); ps["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); @@ -497,35 +497,38 @@ TEST_CASE( "recordComponent_modification_test", "[core]" ) REQUIRE(r["z"].numAttributes() == 1); /* unitSI */ } -TEST_CASE( "mesh_constructor_test", "[core]" ) +TEST_CASE("mesh_constructor_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); Mesh &m = o.iterations[42].meshes["E"]; - std::vector< double > pos{0}; + std::vector pos{0}; REQUIRE(m["x"].unitSI() == 1); REQUIRE(m["x"].numAttributes() == 2); /* unitSI, position */ - REQUIRE(m["x"].position< double >() == pos); + REQUIRE(m["x"].position() == pos); REQUIRE(m["y"].unitSI() == 1); REQUIRE(m["y"].numAttributes() == 2); /* unitSI, position */ - REQUIRE(m["y"].position< double >() == pos); + REQUIRE(m["y"].position() == pos); REQUIRE(m["z"].unitSI() == 1); REQUIRE(m["z"].numAttributes() == 2); /* unitSI, position */ - REQUIRE(m["z"].position< double >() == pos); + REQUIRE(m["z"].position() == pos); REQUIRE(m.geometry() == Mesh::Geometry::cartesian); REQUIRE(m.dataOrder() == Mesh::DataOrder::C); - std::vector< std::string > al{"x"}; + std::vector al{"x"}; REQUIRE(m.axisLabels() == al); - std::vector< double > gs{1}; - REQUIRE(m.gridSpacing< double >() == gs); - std::vector< double > ggo{0}; + std::vector gs{1}; + REQUIRE(m.gridSpacing() == gs); + std::vector ggo{0}; REQUIRE(m.gridGlobalOffset() == ggo); REQUIRE(m.gridUnitSI() == static_cast(1)); - REQUIRE(m.numAttributes() == 8); /* axisLabels, dataOrder, geometry, gridGlobalOffset, gridSpacing, gridUnitSI, timeOffset, unitDimension */ + REQUIRE( + m.numAttributes() == + 8); /* axisLabels, dataOrder, geometry, gridGlobalOffset, gridSpacing, + gridUnitSI, timeOffset, unitDimension */ } -TEST_CASE( "mesh_modification_test", "[core]" ) +TEST_CASE("mesh_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); @@ -540,15 +543,15 @@ TEST_CASE( "mesh_modification_test", "[core]" ) m.setDataOrder(Mesh::DataOrder::F); REQUIRE(m.dataOrder() == Mesh::DataOrder::F); REQUIRE(m.numAttributes() == 8); - std::vector< std::string > al{"z_", "y_", "x_"}; + std::vector al{"z_", "y_", "x_"}; m.setAxisLabels({"z_", "y_", "x_"}); REQUIRE(m.axisLabels() == al); REQUIRE(m.numAttributes() == 8); - std::vector< double > gs{1e-5, 2e-5, 3e-5}; + std::vector gs{1e-5, 2e-5, 3e-5}; m.setGridSpacing(gs); - REQUIRE(m.gridSpacing< double >() == gs); + REQUIRE(m.gridSpacing() == gs); REQUIRE(m.numAttributes() == 8); - std::vector< double > ggo{1e-10, 2e-10, 3e-10}; + std::vector ggo{1e-10, 2e-10, 3e-10}; m.setGridGlobalOffset({1e-10, 2e-10, 3e-10}); REQUIRE(m.gridGlobalOffset() == ggo); REQUIRE(m.numAttributes() == 8); @@ -560,11 +563,11 @@ TEST_CASE( "mesh_modification_test", "[core]" ) REQUIRE(m.geometryParameters() == gp); REQUIRE(m.numAttributes() == 9); - m["x"].setPosition(std::vector< float >{0, 0, 0}); + m["x"].setPosition(std::vector{0, 0, 0}); REQUIRE(m.numAttributes() == 9); } -TEST_CASE( "structure_test", "[core]" ) +TEST_CASE("structure_test", "[core]") { #if openPMD_USE_INVASIVE_TESTS Series o = Series("./new_openpmd_output_%T.json", Access::CREATE); @@ -584,113 +587,205 @@ TEST_CASE( "structure_test", "[core]" ) REQUIRE(m.IOHandler()); REQUIRE(o.iterations[1].meshes["M"].IOHandler()); REQUIRE(m.parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M"].parent() == getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M"].parent() == + getWritable(&o.iterations[1].meshes)); MeshRecordComponent mrc = o.iterations[1].meshes["M"]["MRC"]; REQUIRE(mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M"]["MRC"].IOHandler()); REQUIRE(mrc.parent() == getWritable(&o.iterations[1].meshes["M"])); - REQUIRE(o.iterations[1].meshes["M"]["MRC"].parent() == getWritable(&o.iterations[1].meshes["M"])); + REQUIRE( + o.iterations[1].meshes["M"]["MRC"].parent() == + getWritable(&o.iterations[1].meshes["M"])); mrc = o.iterations[1].meshes["M"]["MRC"].makeConstant(1.0); REQUIRE(mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M"]["MRC"].IOHandler()); REQUIRE(mrc.parent() == getWritable(&o.iterations[1].meshes["M"])); - REQUIRE(o.iterations[1].meshes["M"]["MRC"].parent() == getWritable(&o.iterations[1].meshes["M"])); + REQUIRE( + o.iterations[1].meshes["M"]["MRC"].parent() == + getWritable(&o.iterations[1].meshes["M"])); - MeshRecordComponent scalar_mrc = o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR]; + MeshRecordComponent scalar_mrc = + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR]; REQUIRE(scalar_mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M2"].IOHandler()); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); REQUIRE(scalar_mrc.parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"].parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].meshes)); - scalar_mrc = o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].makeConstant(1.0); + REQUIRE( + o.iterations[1].meshes["M2"].parent() == + getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == + getWritable(&o.iterations[1].meshes)); + scalar_mrc = + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].makeConstant( + 1.0); REQUIRE(scalar_mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M2"].IOHandler()); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); REQUIRE(scalar_mrc.parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"].parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M2"].parent() == + getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == + getWritable(&o.iterations[1].meshes)); ParticleSpecies ps = o.iterations[1].particles["P"]; REQUIRE(ps.IOHandler()); REQUIRE(o.iterations[1].particles["P"].IOHandler()); REQUIRE(ps.parent() == getWritable(&o.iterations[1].particles)); - REQUIRE(o.iterations[1].particles["P"].parent() == getWritable(&o.iterations[1].particles)); + REQUIRE( + o.iterations[1].particles["P"].parent() == + getWritable(&o.iterations[1].particles)); REQUIRE(o.iterations[1].particles["P"].particlePatches.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches.parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1].particles["P"].particlePatches.parent() == + getWritable(&o.iterations[1].particles["P"])); auto dset = Dataset(Datatype::DOUBLE, {1}); - o.iterations[1].particles["P"]["position"][RecordComponent::SCALAR].resetDataset(dset); - o.iterations[1].particles["P"]["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); + o.iterations[1] + .particles["P"]["position"][RecordComponent::SCALAR] + .resetDataset(dset); + o.iterations[1] + .particles["P"]["positionOffset"][RecordComponent::SCALAR] + .resetDataset(dset); Record r = o.iterations[1].particles["P"]["PR"]; REQUIRE(r.IOHandler()); REQUIRE(o.iterations[1].particles["P"]["PR"].IOHandler()); REQUIRE(r.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"]["PR"].parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1].particles["P"]["PR"].parent() == + getWritable(&o.iterations[1].particles["P"])); RecordComponent rc = o.iterations[1].particles["P"]["PR"]["PRC"]; REQUIRE(rc.IOHandler()); REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].IOHandler()); REQUIRE(rc.parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); - REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); + REQUIRE( + o.iterations[1].particles["P"]["PR"]["PRC"].parent() == + getWritable(&o.iterations[1].particles["P"]["PR"])); rc = o.iterations[1].particles["P"]["PR"]["PRC"].makeConstant(1.0); REQUIRE(rc.IOHandler()); REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].IOHandler()); REQUIRE(rc.parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); - REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); + REQUIRE( + o.iterations[1].particles["P"]["PR"]["PRC"].parent() == + getWritable(&o.iterations[1].particles["P"]["PR"])); - RecordComponent scalar_rc = o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR]; + RecordComponent scalar_rc = + o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR]; REQUIRE(scalar_rc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].IOHandler()); + REQUIRE(o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .IOHandler()); REQUIRE(scalar_rc.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].particles["P"])); - scalar_rc = o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].makeConstant(1.0); + REQUIRE( + o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .parent() == getWritable(&o.iterations[1].particles["P"])); + scalar_rc = o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .makeConstant(1.0); REQUIRE(scalar_rc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].IOHandler()); + REQUIRE(o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .IOHandler()); REQUIRE(scalar_rc.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(1 == o.iterations[1].particles["P"].particlePatches.count("numParticles")); - REQUIRE(1 == o.iterations[1].particles["P"].particlePatches.count("numParticlesOffset")); + REQUIRE( + 1 == + o.iterations[1].particles["P"].particlePatches.count("numParticles")); + REQUIRE( + 1 == + o.iterations[1].particles["P"].particlePatches.count( + "numParticlesOffset")); ParticlePatches pp = o.iterations[1].particles["P"].particlePatches; REQUIRE(pp.IOHandler()); REQUIRE(o.iterations[1].particles["P"].particlePatches.IOHandler()); REQUIRE(pp.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"].particlePatches.parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1].particles["P"].particlePatches.parent() == + getWritable(&o.iterations[1].particles["P"])); - PatchRecord pr = o.iterations[1].particles["P"].particlePatches["numParticles"]; + PatchRecord pr = + o.iterations[1].particles["P"].particlePatches["numParticles"]; REQUIRE(pr.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"].IOHandler()); - REQUIRE(pr.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE(o.iterations[1] + .particles["P"] + .particlePatches["numParticles"] + .IOHandler()); + REQUIRE( + pr.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1] + .particles["P"] + .particlePatches["numParticles"] + .parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); pr = o.iterations[1].particles["P"].particlePatches["extent"]; REQUIRE(pr.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"].IOHandler()); - REQUIRE(pr.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1].particles["P"].particlePatches["extent"].IOHandler()); + REQUIRE( + pr.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1].particles["P"].particlePatches["extent"].parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); - PatchRecordComponent scalar_prc = o.iterations[1].particles["P"].particlePatches["numParticles"][RecordComponent::SCALAR]; + PatchRecordComponent scalar_prc = + o.iterations[1].particles["P"].particlePatches["numParticles"] + [RecordComponent::SCALAR]; REQUIRE(scalar_prc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"][RecordComponent::SCALAR].IOHandler()); - REQUIRE(scalar_prc.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - - PatchRecordComponent prc = o.iterations[1].particles["P"].particlePatches["extent"]["x"]; + REQUIRE(o.iterations[1] + .particles["P"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .IOHandler()); + REQUIRE( + scalar_prc.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1] + .particles["P"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + + PatchRecordComponent prc = + o.iterations[1].particles["P"].particlePatches["extent"]["x"]; REQUIRE(prc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"]["x"].IOHandler()); - REQUIRE(prc.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"]["x"].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); + REQUIRE(o.iterations[1] + .particles["P"] + .particlePatches["extent"]["x"] + .IOHandler()); + REQUIRE( + prc.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); + REQUIRE( + o.iterations[1] + .particles["P"] + .particlePatches["extent"]["x"] + .parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); prc.resetDataset(dset); #else std::cerr << "Invasive tests not enabled. Hierarchy is not visible.\n"; #endif } -TEST_CASE( "wrapper_test", "[core]" ) +TEST_CASE("wrapper_test", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); @@ -707,20 +802,24 @@ TEST_CASE( "wrapper_test", "[core]" ) REQUIRE(o.iterationEncoding() == IterationEncoding::groupBased); REQUIRE(o.name() == "other_name"); - o.iterations[1].meshes["E"]["x"].resetDataset(Dataset(Datatype::USHORT, {42})); + o.iterations[1].meshes["E"]["x"].resetDataset( + Dataset(Datatype::USHORT, {42})); MeshRecordComponent mrc = o.iterations[1].meshes["E"]["x"]; REQUIRE(mrc.getDatatype() == Datatype::USHORT); REQUIRE(mrc.getExtent() == Extent{42}); mrc.resetDataset(Dataset(Datatype::LONG_DOUBLE, {7})); - REQUIRE(o.iterations[1].meshes["E"]["x"].getDatatype() == Datatype::LONG_DOUBLE); + REQUIRE( + o.iterations[1].meshes["E"]["x"].getDatatype() == + Datatype::LONG_DOUBLE); REQUIRE(o.iterations[1].meshes["E"]["x"].getExtent() == Extent{7}); - Container< Iteration, uint64_t > its = o.iterations; + Container its = o.iterations; its[1].meshes["E"]["y"].resetDataset(Dataset(Datatype::CHAR, {2})); REQUIRE(o.iterations[1].meshes["E"].count("y") == 1); REQUIRE(o.iterations[1].meshes["E"]["y"].getDatatype() == Datatype::CHAR); REQUIRE(o.iterations[1].meshes["E"]["y"].getExtent() == Extent{2}); - o.iterations[1].meshes["E"]["z"].resetDataset(Dataset(Datatype::FLOAT, {1234})); + o.iterations[1].meshes["E"]["z"].resetDataset( + Dataset(Datatype::FLOAT, {1234})); REQUIRE(its[1].meshes["E"].count("z") == 1); REQUIRE(its[1].meshes["E"]["z"].getDatatype() == Datatype::FLOAT); REQUIRE(its[1].meshes["E"]["z"].getExtent() == Extent{1234}); @@ -731,7 +830,8 @@ TEST_CASE( "wrapper_test", "[core]" ) REQUIRE(o.iterations.count(3) == 1); double value = 42.; - o.iterations[4].meshes["E"]["y"].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[4].meshes["E"]["y"].resetDataset( + Dataset(Datatype::DOUBLE, {1})); o.iterations[4].meshes["E"]["y"].makeConstant(value); MeshRecordComponent mrc2 = o.iterations[4].meshes["E"]["y"]; REQUIRE(mrc2.constant()); @@ -739,12 +839,13 @@ TEST_CASE( "wrapper_test", "[core]" ) mrc2.loadChunk(shareRaw(&loadData), {0}, {1}); o.flush(); REQUIRE(loadData == value); - // TODO: do we want to be able to make data constant after already writing it once? - // value = 43.; - // mrc2.makeConstant(value); - REQUIRE_THROWS_WITH(mrc2.makeConstant(value), - Catch::Equals("A recordComponent can not (yet) be made constant after it has been written.")); - std::array< double, 1 > moreData = {{ 112233. }}; + // TODO: do we want to be able to make data constant after already writing + // it once? value = 43.; mrc2.makeConstant(value); + REQUIRE_THROWS_WITH( + mrc2.makeConstant(value), + Catch::Equals("A recordComponent can not (yet) be made constant after " + "it has been written.")); + std::array moreData = {{112233.}}; o.iterations[4].meshes["E"]["y"].loadChunk(shareRaw(moreData), {0}, {1}); o.flush(); REQUIRE(moreData[0] == value); @@ -757,11 +858,15 @@ TEST_CASE( "wrapper_test", "[core]" ) #endif MeshRecordComponent mrc3 = o.iterations[5].meshes["E"]["y"]; - o.iterations[5].meshes["E"]["y"].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[5].meshes["E"]["y"].resetDataset( + Dataset(Datatype::DOUBLE, {1})); int wrongData = 42; - REQUIRE_THROWS_WITH(o.iterations[5].meshes["E"]["y"].storeChunk(shareRaw(&wrongData), {0}, {1}), - Catch::Equals("Datatypes of chunk data (INT) and record component (DOUBLE) do not match.")); - std::shared_ptr< double > storeData = std::make_shared< double >(44); + REQUIRE_THROWS_WITH( + o.iterations[5].meshes["E"]["y"].storeChunk( + shareRaw(&wrongData), {0}, {1}), + Catch::Equals("Datatypes of chunk data (INT) and record component " + "(DOUBLE) do not match.")); + std::shared_ptr storeData = std::make_shared(44); o.iterations[5].meshes["E"]["y"].storeChunk(storeData, {0}, {1}); #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.iterations[5].meshes["E"]["y"].m_chunks->size() == 1); @@ -773,50 +878,91 @@ TEST_CASE( "wrapper_test", "[core]" ) REQUIRE(mrc3.m_chunks->empty()); #endif - o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].resetDataset(Dataset(determineDatatype< uint64_t >(), {4})); + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .resetDataset(Dataset(determineDatatype(), {4})); auto dset = Dataset(Datatype::DOUBLE, {1}); - o.iterations[6].particles["electrons"]["position"][RecordComponent::SCALAR].resetDataset(dset); - o.iterations[6].particles["electrons"]["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); + o.iterations[6] + .particles["electrons"]["position"][RecordComponent::SCALAR] + .resetDataset(dset); + o.iterations[6] + .particles["electrons"]["positionOffset"][RecordComponent::SCALAR] + .resetDataset(dset); ParticlePatches pp = o.iterations[6].particles["electrons"].particlePatches; - REQUIRE(pp["numParticles"][RecordComponent::SCALAR].getDatatype() == determineDatatype< uint64_t >()); - REQUIRE(pp["numParticles"][RecordComponent::SCALAR].getExtent() == Extent{4}); + REQUIRE( + pp["numParticles"][RecordComponent::SCALAR].getDatatype() == + determineDatatype()); + REQUIRE( + pp["numParticles"][RecordComponent::SCALAR].getExtent() == Extent{4}); pp["prop"]["x"].resetDataset(Dataset(Datatype::DOUBLE, {7})); - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["prop"]["x"].getDatatype() == Datatype::DOUBLE); - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["prop"]["x"].getExtent() == Extent{7}); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["prop"]["x"] + .getDatatype() == Datatype::DOUBLE); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["prop"]["x"] + .getExtent() == Extent{7}); size_t idx = 0; uint64_t val = 10; #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].m_chunks->empty()); + REQUIRE(o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .m_chunks->empty()); REQUIRE(pp["numParticles"][RecordComponent::SCALAR].m_chunks->empty()); #endif pp["numParticles"][RecordComponent::SCALAR].store(idx, val); #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].m_chunks->size() == 1); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .m_chunks->size() == 1); REQUIRE(pp["numParticles"][RecordComponent::SCALAR].m_chunks->size() == 1); #endif std::stringstream u64str; u64str << determineDatatype(); - REQUIRE_THROWS_WITH(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].store(idx+1, 42.), - Catch::Equals("Datatypes of patch data (DOUBLE) and dataset (" + u64str.str() + ") do not match.")); - o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].store(idx+1, val+1); + REQUIRE_THROWS_WITH( + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .store(idx + 1, 42.), + Catch::Equals( + "Datatypes of patch data (DOUBLE) and dataset (" + u64str.str() + + ") do not match.")); + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .store(idx + 1, val + 1); #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].m_chunks->size() == 2); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .m_chunks->size() == 2); REQUIRE(pp["numParticles"][RecordComponent::SCALAR].m_chunks->size() == 2); #endif o.flush(); #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].m_chunks->empty()); + REQUIRE(o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .m_chunks->empty()); REQUIRE(pp["numParticles"][RecordComponent::SCALAR].m_chunks->empty()); #endif } -TEST_CASE( "use_count_test", "[core]" ) +TEST_CASE("use_count_test", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); MeshRecordComponent mrc = o.iterations[1].meshes["E"]["x"]; mrc.resetDataset(Dataset(determineDatatype(), {42})); - std::shared_ptr< uint16_t > storeData = std::make_shared< uint16_t >(44); + std::shared_ptr storeData = std::make_shared(44); REQUIRE(storeData.use_count() == 1); mrc.storeChunk(storeData, {0}, {1}); REQUIRE(storeData.use_count() == 2); @@ -824,203 +970,218 @@ TEST_CASE( "use_count_test", "[core]" ) REQUIRE(storeData.use_count() == 1); #if openPMD_USE_INVASIVE_TESTS - PatchRecordComponent pprc = o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR]; + PatchRecordComponent pprc = + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR]; auto dset = Dataset(Datatype::DOUBLE, {1}); - o.iterations[6].particles["electrons"]["position"][RecordComponent::SCALAR].resetDataset(dset); - o.iterations[6].particles["electrons"]["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); + o.iterations[6] + .particles["electrons"]["position"][RecordComponent::SCALAR] + .resetDataset(dset); + o.iterations[6] + .particles["electrons"]["positionOffset"][RecordComponent::SCALAR] + .resetDataset(dset); pprc.resetDataset(Dataset(determineDatatype(), {4})); - pprc.store(0, static_cast< uint64_t >(1)); - REQUIRE(static_cast< Parameter< Operation::WRITE_DATASET >* >(pprc.m_chunks->front().parameter.get())->data.use_count() == 1); + pprc.store(0, static_cast(1)); + REQUIRE( + static_cast *>( + pprc.m_chunks->front().parameter.get()) + ->data.use_count() == 1); #endif } -TEST_CASE( "empty_record_test", "[core]" ) +TEST_CASE("empty_record_test", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); - o.iterations[1].meshes["E"].setComment("No assumption about contained RecordComponents will be made"); - REQUIRE_THROWS_WITH(o.flush(), - Catch::Equals("A Record can not be written without any contained RecordComponents: E")); - o.iterations[1].meshes["E"][RecordComponent::SCALAR].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[1].meshes["E"].setComment( + "No assumption about contained RecordComponents will be made"); + REQUIRE_THROWS_WITH( + o.flush(), + Catch::Equals("A Record can not be written without any contained " + "RecordComponents: E")); + o.iterations[1].meshes["E"][RecordComponent::SCALAR].resetDataset( + Dataset(Datatype::DOUBLE, {1})); o.flush(); } -TEST_CASE( "zero_extent_component", "[core]" ) +TEST_CASE("zero_extent_component", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); auto E_x = o.iterations[1].meshes["E"]["x"]; E_x.setComment("Datasets must contain dimensions."); - //REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::LONG, {})), - // Catch::Equals("Dataset extent must be at least 1D.")); - REQUIRE_THROWS_WITH(E_x.makeEmpty(0), - Catch::Equals("Dataset extent must be at least 1D.")); + // REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::LONG, {})), + // Catch::Equals("Dataset extent must be at least + // 1D.")); + REQUIRE_THROWS_WITH( + E_x.makeEmpty(0), + Catch::Equals("Dataset extent must be at least 1D.")); E_x.resetDataset(Dataset(Datatype::DOUBLE, {1})); } -TEST_CASE( "no_file_ending", "[core]" ) +TEST_CASE("no_file_ending", "[core]") { - REQUIRE_THROWS_WITH(Series("./new_openpmd_output", Access::CREATE), - Catch::Equals("Unknown file format! Did you specify a file ending?")); - REQUIRE_THROWS_WITH(Series("./new_openpmd_output_%T", Access::CREATE), - Catch::Equals("Unknown file format! Did you specify a file ending?")); - REQUIRE_THROWS_WITH(Series("./new_openpmd_output_%05T", Access::CREATE), - Catch::Equals("Unknown file format! Did you specify a file ending?")); + REQUIRE_THROWS_WITH( + Series("./new_openpmd_output", Access::CREATE), + Catch::Equals("Unknown file format! Did you specify a file ending?")); + REQUIRE_THROWS_WITH( + Series("./new_openpmd_output_%T", Access::CREATE), + Catch::Equals("Unknown file format! Did you specify a file ending?")); + REQUIRE_THROWS_WITH( + Series("./new_openpmd_output_%05T", Access::CREATE), + Catch::Equals("Unknown file format! Did you specify a file ending?")); } -TEST_CASE( "custom_geometries", "[core]" ) +TEST_CASE("custom_geometries", "[core]") { - std::vector< int > sampleData( 10, 0 ); + std::vector sampleData(10, 0); { - Series write( "../samples/custom_geometry.json", Access::CREATE ); - auto E = write.iterations[ 0 ].meshes[ "E" ]; - E.setAttribute( "geometry", "other:customGeometry" ); - auto E_x = E[ "x" ]; - E_x.resetDataset( { Datatype::INT, { 10 } } ); - E_x.storeChunk( sampleData, { 0 }, { 10 } ); - - auto B = write.iterations[ 0 ].meshes[ "B" ]; - B.setGeometry( "customGeometry" ); - auto B_x = B[ "x" ]; - B_x.resetDataset( { Datatype::INT, { 10 } } ); - B_x.storeChunk( sampleData, { 0 }, { 10 } ); - - auto e_energyDensity = - write.iterations[ 0 ].meshes[ "e_energyDensity" ]; - e_energyDensity.setGeometry( "other:customGeometry" ); - auto e_energyDensity_x = e_energyDensity[ RecordComponent::SCALAR ]; - e_energyDensity_x.resetDataset( { Datatype::INT, { 10 } } ); - e_energyDensity_x.storeChunk( sampleData, { 0 }, { 10 } ); - - auto e_chargeDensity = - write.iterations[ 0 ].meshes[ "e_chargeDensity" ]; - e_chargeDensity.setGeometry( Mesh::Geometry::other ); - auto e_chargeDensity_x = e_chargeDensity[ MeshRecordComponent::SCALAR ]; - e_chargeDensity_x.resetDataset( { Datatype::INT, { 10 } } ); - e_chargeDensity_x.storeChunk( sampleData, { 0 }, { 10 } ); + Series write("../samples/custom_geometry.json", Access::CREATE); + auto E = write.iterations[0].meshes["E"]; + E.setAttribute("geometry", "other:customGeometry"); + auto E_x = E["x"]; + E_x.resetDataset({Datatype::INT, {10}}); + E_x.storeChunk(sampleData, {0}, {10}); + + auto B = write.iterations[0].meshes["B"]; + B.setGeometry("customGeometry"); + auto B_x = B["x"]; + B_x.resetDataset({Datatype::INT, {10}}); + B_x.storeChunk(sampleData, {0}, {10}); + + auto e_energyDensity = write.iterations[0].meshes["e_energyDensity"]; + e_energyDensity.setGeometry("other:customGeometry"); + auto e_energyDensity_x = e_energyDensity[RecordComponent::SCALAR]; + e_energyDensity_x.resetDataset({Datatype::INT, {10}}); + e_energyDensity_x.storeChunk(sampleData, {0}, {10}); + + auto e_chargeDensity = write.iterations[0].meshes["e_chargeDensity"]; + e_chargeDensity.setGeometry(Mesh::Geometry::other); + auto e_chargeDensity_x = e_chargeDensity[MeshRecordComponent::SCALAR]; + e_chargeDensity_x.resetDataset({Datatype::INT, {10}}); + e_chargeDensity_x.storeChunk(sampleData, {0}, {10}); } { - Series read( "../samples/custom_geometry.json", Access::READ_ONLY ); - auto E = read.iterations[ 0 ].meshes[ "E" ]; + Series read("../samples/custom_geometry.json", Access::READ_ONLY); + auto E = read.iterations[0].meshes["E"]; REQUIRE( - E.getAttribute( "geometry" ).get< std::string >() == - "other:customGeometry" ); - REQUIRE( E.geometry() == Mesh::Geometry::other ); - REQUIRE( E.geometryString() == "other:customGeometry" ); + E.getAttribute("geometry").get() == + "other:customGeometry"); + REQUIRE(E.geometry() == Mesh::Geometry::other); + REQUIRE(E.geometryString() == "other:customGeometry"); - auto B = read.iterations[ 0 ].meshes[ "B" ]; + auto B = read.iterations[0].meshes["B"]; REQUIRE( - B.getAttribute( "geometry" ).get< std::string >() == - "other:customGeometry" ); - REQUIRE( B.geometry() == Mesh::Geometry::other ); - REQUIRE( B.geometryString() == "other:customGeometry" ); + B.getAttribute("geometry").get() == + "other:customGeometry"); + REQUIRE(B.geometry() == Mesh::Geometry::other); + REQUIRE(B.geometryString() == "other:customGeometry"); - auto e_energyDensity = read.iterations[ 0 ].meshes[ "e_energyDensity" ]; + auto e_energyDensity = read.iterations[0].meshes["e_energyDensity"]; REQUIRE( - e_energyDensity.getAttribute( "geometry" ).get< std::string >() == - "other:customGeometry" ); - REQUIRE( e_energyDensity.geometry() == Mesh::Geometry::other ); - REQUIRE( e_energyDensity.geometryString() == "other:customGeometry" ); + e_energyDensity.getAttribute("geometry").get() == + "other:customGeometry"); + REQUIRE(e_energyDensity.geometry() == Mesh::Geometry::other); + REQUIRE(e_energyDensity.geometryString() == "other:customGeometry"); - auto e_chargeDensity = read.iterations[ 0 ].meshes[ "e_chargeDensity" ]; + auto e_chargeDensity = read.iterations[0].meshes["e_chargeDensity"]; REQUIRE( - e_chargeDensity.getAttribute( "geometry" ).get< std::string >() == - "other" ); - REQUIRE( e_chargeDensity.geometry() == Mesh::Geometry::other ); - REQUIRE( e_chargeDensity.geometryString() == "other" ); + e_chargeDensity.getAttribute("geometry").get() == + "other"); + REQUIRE(e_chargeDensity.geometry() == Mesh::Geometry::other); + REQUIRE(e_chargeDensity.geometryString() == "other"); } } -TEST_CASE( "load_chunk_wrong_datatype", "[core]" ) +TEST_CASE("load_chunk_wrong_datatype", "[core]") { { - Series write( "../samples/some_float_value.json", Access::CREATE ); - Dataset ds{ Datatype::FLOAT, { 10 } }; - std::vector< float > sampleData( 10, 1234.5 ); - auto rc = - write.iterations[ 0 ].meshes[ "rho" ][ RecordComponent::SCALAR ]; - rc.resetDataset( ds ); - rc.storeChunk( sampleData, { 0 }, { 10 } ); + Series write("../samples/some_float_value.json", Access::CREATE); + Dataset ds{Datatype::FLOAT, {10}}; + std::vector sampleData(10, 1234.5); + auto rc = write.iterations[0].meshes["rho"][RecordComponent::SCALAR]; + rc.resetDataset(ds); + rc.storeChunk(sampleData, {0}, {10}); write.flush(); } { - Series read( "../samples/some_float_value.json", Access::READ_ONLY ); + Series read("../samples/some_float_value.json", Access::READ_ONLY); REQUIRE_THROWS_WITH( - read.iterations[ 0 ] - .meshes[ "rho" ][ RecordComponent::SCALAR ] - .loadChunk< double >( { 0 }, { 10 } ), + read.iterations[0] + .meshes["rho"][RecordComponent::SCALAR] + .loadChunk({0}, {10}), Catch::Equals( - "Type conversion during chunk loading not yet implemented" ) ); + "Type conversion during chunk loading not yet implemented")); } } -TEST_CASE( "DoConvert_single_value_to_vector", "[core]" ) +TEST_CASE("DoConvert_single_value_to_vector", "[core]") { #if openPMD_HAVE_ADIOS2 { - Series write( "../samples/writeSingleMesh.bp", Access::CREATE ); - auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 10 } } ); - E_x.makeConstant( 10 ); + Series write("../samples/writeSingleMesh.bp", Access::CREATE); + auto E_x = write.iterations[0].meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {10}}); + E_x.makeConstant(10); } { - Series read( "../samples/writeSingleMesh.bp", Access::READ_ONLY ); - auto E = read.iterations[ 0 ].meshes[ "E" ]; - REQUIRE( E.axisLabels() == std::vector< std::string >{ "x" } ); + Series read("../samples/writeSingleMesh.bp", Access::READ_ONLY); + auto E = read.iterations[0].meshes["E"]; + REQUIRE(E.axisLabels() == std::vector{"x"}); } #endif { char val = 'x'; - Attribute attr{ val }; + Attribute attr{val}; // the following conversions should be possible - REQUIRE( attr.get< char >() == 'x' ); // no conversion - REQUIRE( attr.get< unsigned char >() == 'x' ); - REQUIRE( attr.get< signed char >() == 'x' ); + REQUIRE(attr.get() == 'x'); // no conversion + REQUIRE(attr.get() == 'x'); + REQUIRE(attr.get() == 'x'); // all the previous ones, but make them single-element vectors now + REQUIRE(attr.get >() == std::vector{'x'}); REQUIRE( - attr.get< std::vector< char > >() == std::vector< char >{ 'x' } ); - REQUIRE( - attr.get< std::vector< unsigned char > >() == - std::vector< unsigned char >{ 'x' } ); + attr.get >() == + std::vector{'x'}); REQUIRE( - attr.get< std::vector< signed char > >() == - std::vector< signed char >{ 'x' } ); + attr.get >() == + std::vector{'x'}); } { - std::array< double, 7 > array{{ 0, 1, 2, 3, 4, 5, 6 }}; - Attribute attr{ array }; + std::array array{{0, 1, 2, 3, 4, 5, 6}}; + Attribute attr{array}; // the following conversions should be possible - REQUIRE( attr.get< std::array< double, 7 > >() == array ); + REQUIRE(attr.get >() == array); // we don't need array-to-array conversions, // so array< int, 7 > cannot be loaded here REQUIRE( - attr.get< std::vector< double > >() == - std::vector< double >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get >() == + std::vector{0, 1, 2, 3, 4, 5, 6}); REQUIRE( - attr.get< std::vector< int > >() == - std::vector< int >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get >() == + std::vector{0, 1, 2, 3, 4, 5, 6}); } { - std::vector< double > vector{ 0, 1, 2, 3, 4, 5, 6 }; - std::array< double, 7 > arraydouble{{ 0, 1, 2, 3, 4, 5, 6 }}; - std::array< int, 7 > arrayint{{ 0, 1, 2, 3, 4, 5, 6 }}; - Attribute attr{ vector }; + std::vector vector{0, 1, 2, 3, 4, 5, 6}; + std::array arraydouble{{0, 1, 2, 3, 4, 5, 6}}; + std::array arrayint{{0, 1, 2, 3, 4, 5, 6}}; + Attribute attr{vector}; // the following conversions should be possible - REQUIRE( attr.get< std::array< double, 7 > >() == arraydouble ); - REQUIRE( attr.get< std::array< int, 7 > >() == arrayint ); + REQUIRE(attr.get >() == arraydouble); + REQUIRE(attr.get >() == arrayint); REQUIRE_THROWS_WITH( - ( attr.get< std::array< int, 8 > >() ), - Catch::Equals( "getCast: no vector to array conversion possible " - "(wrong requested array size)." ) ); + (attr.get >()), + Catch::Equals("getCast: no vector to array conversion possible " + "(wrong requested array size).")); REQUIRE( - attr.get< std::vector< double > >() == - std::vector< double >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get >() == + std::vector{0, 1, 2, 3, 4, 5, 6}); REQUIRE( - attr.get< std::vector< int > >() == - std::vector< int >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get >() == + std::vector{0, 1, 2, 3, 4, 5, 6}); } } diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index da93297358..a157a9feb5 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -7,24 +7,25 @@ #include #if openPMD_HAVE_MPI -# include - -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include using namespace openPMD; -std::vector getBackends() { +std::vector getBackends() +{ // first component: backend file ending // second component: whether to test 128 bit values std::vector res; @@ -41,61 +42,72 @@ auto const backends = getBackends(); #else -TEST_CASE( "none", "[parallel]" ) -{ } +TEST_CASE("none", "[parallel]") +{} #endif #if openPMD_HAVE_MPI -TEST_CASE( "parallel_multi_series_test", "[parallel]" ) +TEST_CASE("parallel_multi_series_test", "[parallel]") { - std::list< Series > allSeries; + std::list allSeries; auto myBackends = getBackends(); - // this test demonstrates an ADIOS1 (upstream) bug, comment this section to trigger it - auto const rmEnd = std::remove_if( myBackends.begin(), myBackends.end(), [](std::string const & beit) { - return beit == "bp" && - determineFormat("test.bp") == Format::ADIOS1; - }); + // this test demonstrates an ADIOS1 (upstream) bug, comment this section to + // trigger it + auto const rmEnd = std::remove_if( + myBackends.begin(), myBackends.end(), [](std::string const &beit) { + return beit == "bp" && determineFormat("test.bp") == Format::ADIOS1; + }); myBackends.erase(rmEnd, myBackends.end()); // have multiple serial series alive at the same time - for (auto const sn : {1, 2, 3}) { - for (auto const & t: myBackends) + for (auto const sn : {1, 2, 3}) + { + for (auto const &t : myBackends) { auto const file_ending = t; std::cout << file_ending << std::endl; allSeries.emplace_back( - std::string("../samples/parallel_multi_open_test_"). - append(std::to_string(sn)).append(".").append(file_ending), - Access::CREATE, - MPI_COMM_WORLD - ); + std::string("../samples/parallel_multi_open_test_") + .append(std::to_string(sn)) + .append(".") + .append(file_ending), + Access::CREATE, + MPI_COMM_WORLD); allSeries.back().iterations[sn].setAttribute("wululu", sn); allSeries.back().flush(); } } // skip some series: sn=1 auto it = allSeries.begin(); - std::for_each( myBackends.begin(), myBackends.end(), [&it](std::string const &){ - it++; - }); + std::for_each( + myBackends.begin(), myBackends.end(), [&it](std::string const &) { + it++; + }); // remove some series: sn=2 - std::for_each( myBackends.begin(), myBackends.end(), [&it, &allSeries](std::string const &){ - it = allSeries.erase(it); - }); + std::for_each( + myBackends.begin(), + myBackends.end(), + [&it, &allSeries](std::string const &) { it = allSeries.erase(it); }); // write from last series: sn=3 - std::for_each( myBackends.begin(), myBackends.end(), [&it](std::string const &){ - it->iterations[10].setAttribute("wululu", 10); - it->flush(); - it++; - }); + std::for_each( + myBackends.begin(), myBackends.end(), [&it](std::string const &) { + it->iterations[10].setAttribute("wululu", 10); + it->flush(); + it++; + }); // remove all leftover series allSeries.clear(); } -void write_test_zero_extent( bool fileBased, std::string file_ending, bool writeAllChunks, bool declareFromAll ) { +void write_test_zero_extent( + bool fileBased, + std::string file_ending, + bool writeAllChunks, + bool declareFromAll) +{ int mpi_s{-1}; int mpi_r{-1}; MPI_Comm_size(MPI_COMM_WORLD, &mpi_s); @@ -104,17 +116,22 @@ void write_test_zero_extent( bool fileBased, std::string file_ending, bool write auto rank = static_cast(mpi_r); std::string filePath = "../samples/parallel_write_zero_extent"; - if( fileBased ) + if (fileBased) filePath += "_%07T"; - Series o = Series(filePath.append(".").append(file_ending), Access::CREATE, MPI_COMM_WORLD); + Series o = Series( + filePath.append(".").append(file_ending), + Access::CREATE, + MPI_COMM_WORLD); int const max_step = 100; - for( int step=0; step<=max_step; step+=20 ) { + for (int step = 0; step <= max_step; step += 20) + { Iteration it = o.iterations[step]; it.setAttribute("yolo", "yo"); - if( rank != 0 || declareFromAll ) { + if (rank != 0 || declareFromAll) + { ParticleSpecies e = it.particles["e"]; /* every rank n writes n consecutive cells, increasing values @@ -122,102 +139,146 @@ void write_test_zero_extent( bool fileBased, std::string file_ending, bool write * two ranks will result in {1} * three ranks will result in {1, 2, 3} * four ranks will result in {1, 2, 3, 4, 5, 6} */ - uint64_t num_cells = ((size - 1) * (size - 1) + (size - 1)) / 2; /* (n^2 + n) / 2 */ - if (num_cells == 0u) { - std::cerr << "Test can only be run with at least two ranks" << std::endl; + uint64_t num_cells = + ((size - 1) * (size - 1) + (size - 1)) / 2; /* (n^2 + n) / 2 */ + if (num_cells == 0u) + { + std::cerr << "Test can only be run with at least two ranks" + << std::endl; return; } std::vector position_global(num_cells); double pos{1.}; - std::generate(position_global.begin(), position_global.end(), [&pos] { return pos++; }); - std::shared_ptr position_local(new double[rank], [](double const *p) { delete[] p; }); + std::generate( + position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local( + new double[rank], [](double const *p) { delete[] p; }); uint64_t offset; if (rank != 0) offset = ((rank - 1) * (rank - 1) + (rank - 1)) / 2; else offset = 0; - e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells})); + e["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local), {num_cells})); std::vector positionOffset_global(num_cells); uint64_t posOff{1}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { return posOff++; }); - std::shared_ptr positionOffset_local(new uint64_t[rank], [](uint64_t const *p) { delete[] p; }); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local( + new uint64_t[rank], [](uint64_t const *p) { delete[] p; }); - e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells})); + e["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local), {num_cells})); - for (uint64_t i = 0; i < rank; ++i) { + for (uint64_t i = 0; i < rank; ++i) + { position_local.get()[i] = position_global[offset + i]; - positionOffset_local.get()[i] = positionOffset_global[offset + i]; + positionOffset_local.get()[i] = + positionOffset_global[offset + i]; } - if (rank != 0 || writeAllChunks) { + if (rank != 0 || writeAllChunks) + { e["position"]["x"].storeChunk(position_local, {offset}, {rank}); - e["positionOffset"]["x"].storeChunk(positionOffset_local, {offset}, {rank}); + e["positionOffset"]["x"].storeChunk( + positionOffset_local, {offset}, {rank}); } } o.flush(); } - //TODO read back, verify + // TODO read back, verify } #endif #if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI -TEST_CASE( "git_hdf5_sample_content_test", "[parallel][hdf5]" ) +TEST_CASE("git_hdf5_sample_content_test", "[parallel][hdf5]") { int mpi_rank{-1}; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads 1/3 */ + /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads + * 1/3 */ uint64_t rank = mpi_rank % 3; try { - Series o = Series("../samples/git-sample/data00000%T.h5", Access::READ_ONLY, MPI_COMM_WORLD); + Series o = Series( + "../samples/git-sample/data00000%T.h5", + Access::READ_ONLY, + MPI_COMM_WORLD); { - double actual[3][3][3] = {{{-1.9080703683727052e-09, -1.5632650729457964e-10, 1.1497536256399599e-09}, - {-1.9979540244463578e-09, -2.5512036927466397e-10, 1.0402234629225404e-09}, - {-1.7353589676361025e-09, -8.0899198451334087e-10, -1.6443779671249104e-10}}, - - {{-2.0029988778702545e-09, -1.9543477947081556e-10, 1.0916454407094989e-09}, - {-2.3890367462087170e-09, -4.7158010829662089e-10, 9.0026075483251589e-10}, - {-1.9033881137886510e-09, -7.5192119197708962e-10, 5.0038861942880430e-10}}, - - {{-1.3271805876513554e-09, -5.9243276950837753e-10, -2.2445734160214670e-10}, - {-7.4578609954301101e-10, -1.1995737736469891e-10, 2.5611823772919706e-10}, - {-9.4806251738077663e-10, -1.5472800818372434e-10, -3.6461900165818406e-10}}}; - MeshRecordComponent& rho = o.iterations[100].meshes["rho"][MeshRecordComponent::SCALAR]; + double actual[3][3][3] = { + {{-1.9080703683727052e-09, + -1.5632650729457964e-10, + 1.1497536256399599e-09}, + {-1.9979540244463578e-09, + -2.5512036927466397e-10, + 1.0402234629225404e-09}, + {-1.7353589676361025e-09, + -8.0899198451334087e-10, + -1.6443779671249104e-10}}, + + {{-2.0029988778702545e-09, + -1.9543477947081556e-10, + 1.0916454407094989e-09}, + {-2.3890367462087170e-09, + -4.7158010829662089e-10, + 9.0026075483251589e-10}, + {-1.9033881137886510e-09, + -7.5192119197708962e-10, + 5.0038861942880430e-10}}, + + {{-1.3271805876513554e-09, + -5.9243276950837753e-10, + -2.2445734160214670e-10}, + {-7.4578609954301101e-10, + -1.1995737736469891e-10, + 2.5611823772919706e-10}, + {-9.4806251738077663e-10, + -1.5472800818372434e-10, + -3.6461900165818406e-10}}}; + MeshRecordComponent &rho = + o.iterations[100].meshes["rho"][MeshRecordComponent::SCALAR]; Offset offset{20 + rank, 20, 190}; Extent extent{1, 3, 3}; auto data = rho.loadChunk(offset, extent); o.flush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int j = 0; j < 3; ++j ) - for( int k = 0; k < 3; ++k ) - REQUIRE(raw_ptr[j*3 + k] == actual[rank][j][k]); + for (int j = 0; j < 3; ++j) + for (int k = 0; k < 3; ++k) + REQUIRE(raw_ptr[j * 3 + k] == actual[rank][j][k]); } { double constant_value = 9.1093829099999999e-31; - RecordComponent& electrons_mass = o.iterations[100].particles["electrons"]["mass"][RecordComponent::SCALAR]; - Offset offset{(rank+1) * 5}; + RecordComponent &electrons_mass = + o.iterations[100] + .particles["electrons"]["mass"][RecordComponent::SCALAR]; + Offset offset{(rank + 1) * 5}; Extent extent{3}; auto data = electrons_mass.loadChunk(offset, extent); o.flush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int i = 0; i < 3; ++i ) + for (int i = 0; i < 3; ++i) REQUIRE(raw_ptr[i] == constant_value); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "hdf5_write_test", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test", "[parallel][hdf5]") { int mpi_s{-1}; int mpi_r{-1}; @@ -225,60 +286,70 @@ TEST_CASE( "hdf5_write_test", "[parallel][hdf5]" ) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r); auto mpi_size = static_cast(mpi_s); auto mpi_rank = static_cast(mpi_r); - Series o = Series("../samples/parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); + Series o = + Series("../samples/parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); REQUIRE_THROWS_AS(o.setAuthor(""), std::runtime_error); o.setAuthor("Parallel HDF5"); - ParticleSpecies& e = o.iterations[1].particles["e"]; + ParticleSpecies &e = o.iterations[1].particles["e"]; - std::vector< double > position_global(mpi_size); + std::vector position_global(mpi_size); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local(new double); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local(new double); *position_local = position_global[mpi_rank]; - e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {mpi_size})); + e["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local), {mpi_size})); e["position"]["x"].storeChunk(position_local, {mpi_rank}, {1}); - std::vector< uint64_t > positionOffset_global(mpi_size); + std::vector positionOffset_global(mpi_size); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local(new uint64_t); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local(new uint64_t); *positionOffset_local = positionOffset_global[mpi_rank]; - e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {mpi_size})); + e["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local), {mpi_size})); e["positionOffset"]["x"].storeChunk(positionOffset_local, {mpi_rank}, {1}); o.flush(); } -TEST_CASE( "hdf5_write_test_zero_extent", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test_zero_extent", "[parallel][hdf5]") { - write_test_zero_extent( false, "h5", true, true ); - write_test_zero_extent( true, "h5", true, true ); + write_test_zero_extent(false, "h5", true, true); + write_test_zero_extent(true, "h5", true, true); } -TEST_CASE( "hdf5_write_test_skip_chunk", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test_skip_chunk", "[parallel][hdf5]") { //! @todo add via JSON option instead of environment read - auto const hdf5_collective = auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "ON" ); - if( hdf5_collective == "ON" ) + auto const hdf5_collective = + auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "ON"); + if (hdf5_collective == "ON") { - write_test_zero_extent( false, "h5", false, true ); - write_test_zero_extent( true, "h5", false, true ); + write_test_zero_extent(false, "h5", false, true); + write_test_zero_extent(true, "h5", false, true); } else REQUIRE(true); } -TEST_CASE( "hdf5_write_test_skip_declare", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test_skip_declare", "[parallel][hdf5]") { //! @todo add via JSON option instead of environment read - auto const hdf5_collective = auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "OFF" ); - if( hdf5_collective == "ON" ) + auto const hdf5_collective = + auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "OFF"); + if (hdf5_collective == "ON") { - write_test_zero_extent( false, "h5", false, false ); - write_test_zero_extent( true, "h5", false, false ); + write_test_zero_extent(false, "h5", false, false); + write_test_zero_extent(true, "h5", false, false); } else REQUIRE(true); @@ -286,7 +357,7 @@ TEST_CASE( "hdf5_write_test_skip_declare", "[parallel][hdf5]" ) #else -TEST_CASE( "no_parallel_hdf5", "[parallel][hdf5]" ) +TEST_CASE("no_parallel_hdf5", "[parallel][hdf5]") { REQUIRE(true); } @@ -295,14 +366,13 @@ TEST_CASE( "no_parallel_hdf5", "[parallel][hdf5]" ) // this one works for both ADIOS1 and ADIOS2 #if (openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2) && openPMD_HAVE_MPI -void -available_chunks_test( std::string file_ending ) +void available_chunks_test(std::string file_ending) { - int r_mpi_rank{ -1 }, r_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &r_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &r_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( r_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( r_mpi_size ) }; + int r_mpi_rank{-1}, r_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &r_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &r_mpi_size); + unsigned mpi_rank{static_cast(r_mpi_rank)}, + mpi_size{static_cast(r_mpi_size)}; std::string name = "../samples/available_chunks." + file_ending; /* @@ -320,124 +390,123 @@ available_chunks_test( std::string file_ending ) "parameters": { "NumAggregators":)END" - << "\"" << std::to_string(mpi_size) << "\"" << R"END( + << "\"" << std::to_string(mpi_size) << "\"" + << R"END( } } } } )END"; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE, MPI_COMM_WORLD, parameters.str() ); - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { mpi_size, 4 } } ); - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); + Series write(name, Access::CREATE, MPI_COMM_WORLD, parameters.str()); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); it0.close(); } { - Series read( name, Access::READ_ONLY, MPI_COMM_WORLD ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; + Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Iteration it0 = read.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); std::sort( - table.begin(), - table.end(), - []( auto const & lhs, auto const & rhs ) { - return lhs.offset[ 0 ] < rhs.offset[ 0 ]; - } ); - std::vector< int > ranks; - ranks.reserve( table.size() ); - for( size_t i = 0; i < ranks.size(); ++i ) + table.begin(), table.end(), [](auto const &lhs, auto const &rhs) { + return lhs.offset[0] < rhs.offset[0]; + }); + std::vector ranks; + ranks.reserve(table.size()); + for (size_t i = 0; i < ranks.size(); ++i) { - WrittenChunkInfo const & chunk = table[ i ]; - REQUIRE( chunk.offset == Offset{ i, 0 } ); - REQUIRE( chunk.extent == Extent{ 1, 4 } ); - ranks.emplace_back( chunk.sourceID ); + WrittenChunkInfo const &chunk = table[i]; + REQUIRE(chunk.offset == Offset{i, 0}); + REQUIRE(chunk.extent == Extent{1, 4}); + ranks.emplace_back(chunk.sourceID); } /* * In the BP4 engine, sourceID corresponds with the BP subfile. * Since those are in a nondeterministic order, simply check that * they are all present. */ - std::sort( ranks.begin(), ranks.end() ); - for( int i = 0; i < int(ranks.size()); ++i ) + std::sort(ranks.begin(), ranks.end()); + for (int i = 0; i < int(ranks.size()); ++i) { - REQUIRE( ranks[ i ] == i ); + REQUIRE(ranks[i] == i); } } } -TEST_CASE( "available_chunks_test", "[parallel][adios]" ) +TEST_CASE("available_chunks_test", "[parallel][adios]") { - available_chunks_test( "bp" ); + available_chunks_test("bp"); } -void -extendDataset( std::string const & ext ) +void extendDataset(std::string const &ext) { std::string filename = "../samples/parallelExtendDataset." + ext; - int r_mpi_rank{ -1 }, r_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &r_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &r_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( r_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( r_mpi_size ) }; - std::vector< int > data1( 25 ); - std::vector< int > data2( 25 ); - std::iota( data1.begin(), data1.end(), 0 ); - std::iota( data2.begin(), data2.end(), 25 ); + int r_mpi_rank{-1}, r_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &r_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &r_mpi_size); + unsigned mpi_rank{static_cast(r_mpi_rank)}, + mpi_size{static_cast(r_mpi_size)}; + std::vector data1(25); + std::vector data2(25); + std::iota(data1.begin(), data1.end(), 0); + std::iota(data2.begin(), data2.end(), 25); { - Series write( filename, Access::CREATE, MPI_COMM_WORLD ); - if( ext == "bp" && write.backend() != "ADIOS2" ) + Series write(filename, Access::CREATE, MPI_COMM_WORLD); + if (ext == "bp" && write.backend() != "ADIOS2") { // dataset resizing unsupported in ADIOS1 return; } - Dataset ds1{ Datatype::INT, { mpi_size, 25 } }; - Dataset ds2{ { mpi_size, 50 } }; + Dataset ds1{Datatype::INT, {mpi_size, 25}}; + Dataset ds2{{mpi_size, 50}}; // array record component -> array record component // should work - auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - E_x.resetDataset( ds1 ); - E_x.storeChunk( data1, { mpi_rank, 0 }, { 1, 25 } ); + auto E_x = write.iterations[0].meshes["E"]["x"]; + E_x.resetDataset(ds1); + E_x.storeChunk(data1, {mpi_rank, 0}, {1, 25}); write.flush(); - E_x.resetDataset( ds2 ); - E_x.storeChunk( data2, { mpi_rank, 25 }, { 1, 25 } ); + E_x.resetDataset(ds2); + E_x.storeChunk(data2, {mpi_rank, 25}, {1, 25}); write.flush(); } - MPI_Barrier( MPI_COMM_WORLD ); + MPI_Barrier(MPI_COMM_WORLD); { - Series read( filename, Access::READ_ONLY ); - auto E_x = read.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getExtent() == Extent{ mpi_size, 50 } ); - auto chunk = E_x.loadChunk< int >( { 0, 0 }, { mpi_size, 50 } ); + Series read(filename, Access::READ_ONLY); + auto E_x = read.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getExtent() == Extent{mpi_size, 50}); + auto chunk = E_x.loadChunk({0, 0}, {mpi_size, 50}); read.flush(); - for( size_t rank = 0; rank < mpi_size; ++rank ) + for (size_t rank = 0; rank < mpi_size; ++rank) { - for( size_t i = 0; i < 50; ++i ) + for (size_t i = 0; i < 50; ++i) { - REQUIRE( chunk.get()[ i ] == int( i ) ); + REQUIRE(chunk.get()[i] == int(i)); } } } } -TEST_CASE( "extend_dataset", "[parallel]" ) +TEST_CASE("extend_dataset", "[parallel]") { - extendDataset( "bp" ); + extendDataset("bp"); } #endif #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -TEST_CASE( "adios_write_test", "[parallel][adios]" ) +TEST_CASE("adios_write_test", "[parallel][adios]") { - Series o = Series("../samples/parallel_write.bp", Access::CREATE, MPI_COMM_WORLD); + Series o = + Series("../samples/parallel_write.bp", Access::CREATE, MPI_COMM_WORLD); int size{-1}; int rank{-1}; @@ -447,83 +516,96 @@ TEST_CASE( "adios_write_test", "[parallel][adios]" ) auto mpi_rank = static_cast(rank); o.setAuthor("Parallel ADIOS1"); - ParticleSpecies& e = o.iterations[1].particles["e"]; + ParticleSpecies &e = o.iterations[1].particles["e"]; - std::vector< double > position_global(mpi_size); + std::vector position_global(mpi_size); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local(new double); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local(new double); *position_local = position_global[mpi_rank]; - e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {mpi_size})); + e["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local), {mpi_size})); e["position"]["x"].storeChunk(position_local, {mpi_rank}, {1}); - std::vector< uint64_t > positionOffset_global(mpi_size); + std::vector positionOffset_global(mpi_size); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local(new uint64_t); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local(new uint64_t); *positionOffset_local = positionOffset_global[mpi_rank]; - e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {mpi_size})); + e["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local), {mpi_size})); e["positionOffset"]["x"].storeChunk(positionOffset_local, {mpi_rank}, {1}); o.flush(); } -TEST_CASE( "adios_write_test_zero_extent", "[parallel][adios]" ) +TEST_CASE("adios_write_test_zero_extent", "[parallel][adios]") { - write_test_zero_extent( false, "bp", true, true ); - write_test_zero_extent( true, "bp", true, true ); + write_test_zero_extent(false, "bp", true, true); + write_test_zero_extent(true, "bp", true, true); } -TEST_CASE( "adios_write_test_skip_chunk", "[parallel][adios]" ) +TEST_CASE("adios_write_test_skip_chunk", "[parallel][adios]") { - write_test_zero_extent( false, "bp", false, true ); - write_test_zero_extent( true, "bp", false, true ); + write_test_zero_extent(false, "bp", false, true); + write_test_zero_extent(true, "bp", false, true); } -TEST_CASE( "adios_write_test_skip_declare", "[parallel][adios]" ) +TEST_CASE("adios_write_test_skip_declare", "[parallel][adios]") { - write_test_zero_extent( false, "bp", false, false ); - write_test_zero_extent( true, "bp", false, false ); + write_test_zero_extent(false, "bp", false, false); + write_test_zero_extent(true, "bp", false, false); } -TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" ) +TEST_CASE("hzdr_adios_sample_content_test", "[parallel][adios1]") { int mpi_rank{-1}; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads 1/3 */ + /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads + * 1/3 */ uint64_t rank = mpi_rank % 3; try { /* development/huebl/lwfa-bgfield-001 */ - Series o = Series("../samples/hzdr-sample/bp/checkpoint_%T.bp", Access::READ_ONLY, MPI_COMM_WORLD); + Series o = Series( + "../samples/hzdr-sample/bp/checkpoint_%T.bp", + Access::READ_ONLY, + MPI_COMM_WORLD); - if( o.iterations.count(0) == 1) + if (o.iterations.count(0) == 1) { - float actual[3][3][3] = {{{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; - - MeshRecordComponent& B_z = o.iterations[0].meshes["B"]["z"]; + float actual[3][3][3] = { + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; + + MeshRecordComponent &B_z = o.iterations[0].meshes["B"]["z"]; Offset offset{20 + rank, 20, 150}; Extent extent{1, 3, 3}; auto data = B_z.loadChunk(offset, extent); o.flush(); - float* raw_ptr = data.get(); + float *raw_ptr = data.get(); - for( int j = 0; j < 3; ++j ) - for( int k = 0; k < 3; ++k ) - REQUIRE(raw_ptr[j*3 + k] == actual[rank][j][k]); + for (int j = 0; j < 3; ++j) + for (int k = 0; k < 3; ++k) + REQUIRE(raw_ptr[j * 3 + k] == actual[rank][j][k]); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -532,8 +614,7 @@ TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" ) #endif #if openPMD_HAVE_MPI -void -write_4D_test( std::string file_ending ) +void write_4D_test(std::string file_ending) { int mpi_s{-1}; int mpi_r{-1}; @@ -545,30 +626,29 @@ write_4D_test( std::string file_ending ) Series o = Series(name, Access::CREATE, MPI_COMM_WORLD); auto it = o.iterations[1]; - auto E_x = it.meshes[ "E" ][ "x" ]; + auto E_x = it.meshes["E"]["x"]; // every rank out of mpi_size MPI ranks contributes two writes: // - sliced in first dimension (partioned by rank) // - last dimension: every rank has two chunks to contribute - std::vector< double > data( 2 * 10 * 6 * 4, mpi_rank); + std::vector data(2 * 10 * 6 * 4, mpi_rank); - E_x.resetDataset( { Datatype::DOUBLE, { mpi_size * 2, 10, 6, 8 } } ); - E_x.storeChunk( data, { mpi_rank * 2, 0, 0, 0 }, { 2, 10, 6, 4 } ); - E_x.storeChunk( data, { mpi_rank * 2, 0, 0, 4 }, { 2, 10, 6, 4 } ); + E_x.resetDataset({Datatype::DOUBLE, {mpi_size * 2, 10, 6, 8}}); + E_x.storeChunk(data, {mpi_rank * 2, 0, 0, 0}, {2, 10, 6, 4}); + E_x.storeChunk(data, {mpi_rank * 2, 0, 0, 4}, {2, 10, 6, 4}); o.flush(); } -TEST_CASE( "write_4D_test", "[parallel]" ) +TEST_CASE("write_4D_test", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - write_4D_test( t ); + write_4D_test(t); } } -void -write_makeconst_some( std::string file_ending ) +void write_makeconst_some(std::string file_ending) { int mpi_s{-1}; int mpi_r{-1}; @@ -583,140 +663,142 @@ write_makeconst_some( std::string file_ending ) auto it = o.iterations[1]; // I would have expected we need this, since the first call that writes // data below (makeConstant) is not executed in MPI collective manner - //it.open(); - auto E_x = it.meshes[ "E" ][ "x" ]; + // it.open(); + auto E_x = it.meshes["E"]["x"]; - E_x.resetDataset( { Datatype::DOUBLE, { mpi_size * 2, 10, 6, 8 } } ); + E_x.resetDataset({Datatype::DOUBLE, {mpi_size * 2, 10, 6, 8}}); // HDF5 Attribute writes are unfortunately collective - if( mpi_rank != 0u && file_ending != "h5" ) - E_x.makeConstant( 42 ); + if (mpi_rank != 0u && file_ending != "h5") + E_x.makeConstant(42); } -TEST_CASE( "write_makeconst_some", "[parallel]" ) +TEST_CASE("write_makeconst_some", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - write_makeconst_some( t ); + write_makeconst_some(t); } } -void -close_iteration_test( std::string file_ending ) +void close_iteration_test(std::string file_ending) { - int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &i_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( i_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( i_mpi_size ) }; + int i_mpi_rank{-1}, i_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &i_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &i_mpi_size); + unsigned mpi_rank{static_cast(i_mpi_rank)}, + mpi_size{static_cast(i_mpi_size)}; std::string name = "../samples/close_iterations_parallel_%T." + file_ending; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; // { // we do *not* need these parentheses - Series write( name, Access::CREATE, MPI_COMM_WORLD ); + Series write(name, Access::CREATE, MPI_COMM_WORLD); bool isAdios1 = write.backend() == "MPI_ADIOS1"; { - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { mpi_size, 4 } } ); - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); - it0.close( /* flush = */ false ); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + it0.close(/* flush = */ false); } write.flush(); // } - if( isAdios1 ) + if (isAdios1) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( - "../samples/close_iterations_parallel_0.bp" ) ); + REQUIRE(auxiliary::file_exists( + "../samples/close_iterations_parallel_0.bp")); } else { - Series read( name, Access::READ_ONLY, MPI_COMM_WORLD ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x_read = it0.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { mpi_size, 4 } ); - it0.close( /* flush = */ false ); + Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Iteration it0 = read.iterations[0]; + auto E_x_read = it0.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {mpi_size, 4}); + it0.close(/* flush = */ false); read.flush(); - for( size_t i = 0; i < 4 * mpi_size; ++i ) + for (size_t i = 0; i < 4 * mpi_size; ++i) { - REQUIRE( data[ i % 4 ] == chunk.get()[ i ] ); + REQUIRE(data[i % 4] == chunk.get()[i]); } } { - Iteration it1 = write.iterations[ 1 ]; - auto E_x = it1.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { mpi_size, 4 } } ); - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); - it1.close( /* flush = */ true ); + Iteration it1 = write.iterations[1]; + auto E_x = it1.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + it1.close(/* flush = */ true); // illegally access iteration after closing - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); - REQUIRE_THROWS( write.flush() ); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + REQUIRE_THROWS(write.flush()); } - if( isAdios1 ) + if (isAdios1) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( - "../samples/close_iterations_parallel_1.bp" ) ); + REQUIRE(auxiliary::file_exists( + "../samples/close_iterations_parallel_1.bp")); } else { - Series read( name, Access::READ_ONLY, MPI_COMM_WORLD ); - Iteration it1 = read.iterations[ 1 ]; - auto E_x_read = it1.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { mpi_size, 4 } ); - it1.close( /* flush = */ true ); - for( size_t i = 0; i < 4 * mpi_size; ++i ) + Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Iteration it1 = read.iterations[1]; + auto E_x_read = it1.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {mpi_size, 4}); + it1.close(/* flush = */ true); + for (size_t i = 0; i < 4 * mpi_size; ++i) { - REQUIRE( data[ i % 4 ] == chunk.get()[ i ] ); + REQUIRE(data[i % 4] == chunk.get()[i]); } - auto read_again = - E_x_read.loadChunk< int >( { 0, 0 }, { mpi_size, 4 } ); - REQUIRE_THROWS( read.flush() ); + auto read_again = E_x_read.loadChunk({0, 0}, {mpi_size, 4}); + REQUIRE_THROWS(read.flush()); } } -TEST_CASE( "close_iteration_test", "[parallel]" ) +TEST_CASE("close_iteration_test", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - close_iteration_test( t ); + close_iteration_test(t); } } -void -file_based_write_read( std::string file_ending ) +void file_based_write_read(std::string file_ending) { namespace io = openPMD; // the iterations we want to write - std::vector< int > iterations = { 10, 30, 50, 70 }; + std::vector iterations = {10, 30, 50, 70}; // MPI communicator meta-data and file name - int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &i_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( i_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( i_mpi_size ) }; + int i_mpi_rank{-1}, i_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &i_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &i_mpi_size); + unsigned mpi_rank{static_cast(i_mpi_rank)}, + mpi_size{static_cast(i_mpi_size)}; std::string name = "../samples/file_based_write_read_%05T." + file_ending; // data (we just use the same data for each step for demonstration) // we assign 10 longitudinal cells & 300 transversal cells per rank here - unsigned const local_Nz = 10u; + unsigned const local_Nz = 10u; unsigned const global_Nz = local_Nz * mpi_size; unsigned const global_Nx = 300u; using precision = double; - std::vector< precision > E_x_data( global_Nx * local_Nz ); + std::vector E_x_data(global_Nx * local_Nz); // filling some values: 0, 1, ... - std::iota( E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 20. ); }); + std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 20.); + }); { // open a parallel series @@ -724,18 +806,21 @@ file_based_write_read( std::string file_ending ) series.setIterationEncoding(IterationEncoding::fileBased); int const last_step = 100; - for (int step = 0; step < last_step; ++step) { + for (int step = 0; step < last_step; ++step) + { MPI_Barrier(MPI_COMM_WORLD); // is this an output step? bool const rank_in_output_step = - std::find(iterations.begin(), iterations.end(), step) != iterations.end(); - if (!rank_in_output_step) continue; + std::find(iterations.begin(), iterations.end(), step) != + iterations.end(); + if (!rank_in_output_step) + continue; // now we write (parallel, independent I/O) auto it = series.iterations[step]; auto E = it.meshes["E"]; // record - auto E_x = E["x"]; // record component + auto E_x = E["x"]; // record component // some meta-data E.setAxisLabels({"z", "x"}); @@ -745,21 +830,21 @@ file_based_write_read( std::string file_ending ) // update values std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [&step](precision d) -> precision { - return std::sin(d * 2.0 * 3.1415 / 100. + step); - }); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [&step](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 100. + step); + }); auto dataset = io::Dataset( - io::determineDatatype(), - {global_Nx, global_Nz}); + io::determineDatatype(), {global_Nx, global_Nz}); E_x.resetDataset(dataset); Offset chunk_offset = {0, local_Nz * mpi_rank}; Extent chunk_extent = {global_Nx, local_Nz}; - E_x.storeChunk( - io::shareRaw(E_x_data), - chunk_offset, chunk_extent); + E_x.storeChunk(io::shareRaw(E_x_data), chunk_offset, chunk_extent); series.flush(); } } @@ -770,40 +855,39 @@ file_based_write_read( std::string file_ending ) name, Access::READ_ONLY, MPI_COMM_WORLD, - "{\"defer_iteration_parsing\": true}" ); - Iteration it = read.iterations[ 30 ]; + "{\"defer_iteration_parsing\": true}"); + Iteration it = read.iterations[30]; it.open(); // collective - if( mpi_rank == 0 ) // non-collective branch + if (mpi_rank == 0) // non-collective branch { auto E_x = it.meshes["E"]["x"]; - auto data = E_x.loadChunk< double >(); + auto data = E_x.loadChunk(); read.flush(); } } } -TEST_CASE( "file_based_write_read", "[parallel]" ) +TEST_CASE("file_based_write_read", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - file_based_write_read( t ); + file_based_write_read(t); } } -void -hipace_like_write( std::string file_ending ) +void hipace_like_write(std::string file_ending) { namespace io = openPMD; bool const verbose = false; // print statements // the iterations we want to write - std::vector< int > iterations = { 10, 30, 50, 70 }; + std::vector iterations = {10, 30, 50, 70}; // Parallel HDF5 + chunking does not work with independent IO pattern bool const isHDF5 = file_ending == "h5"; std::string options = "{}"; - if( isHDF5 ) + if (isHDF5) options = R"( { "hdf5": { @@ -814,28 +898,33 @@ hipace_like_write( std::string file_ending ) })"; // MPI communicator meta-data and file name - int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &i_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( i_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( i_mpi_size ) }; + int i_mpi_rank{-1}, i_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &i_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &i_mpi_size); + unsigned mpi_rank{static_cast(i_mpi_rank)}, + mpi_size{static_cast(i_mpi_size)}; std::string name = "../samples/hipace_like_write." + file_ending; // data (we just use the same data for each step for demonstration) // we assign 10 longitudinal cells & 300 transversal cells per rank here - unsigned const local_Nz = 10u; + unsigned const local_Nz = 10u; unsigned const global_Nz = local_Nz * mpi_size; unsigned const global_Nx = 300u; using precision = double; - std::vector< precision > E_x_data( global_Nx * local_Nz ); + std::vector E_x_data(global_Nx * local_Nz); // filling some values: 0, 1, ... - std::iota( E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 20. ); }); + std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 20.); + }); // open a parallel series - Series series( name, Access::CREATE, MPI_COMM_WORLD, options ); - series.setIterationEncoding( IterationEncoding::groupBased ); + Series series(name, Access::CREATE, MPI_COMM_WORLD, options); + series.setIterationEncoding(IterationEncoding::groupBased); series.flush(); // in HiPACE, ranks write one-by-one to a "swiped" step, overlapping @@ -843,7 +932,8 @@ hipace_like_write( std::string file_ending ) int const last_step = 100; int const my_first_step = i_mpi_rank * int(local_Nz); int const all_last_step = last_step + (i_mpi_size - 1) * int(local_Nz); - for( int first_rank_step = 0; first_rank_step < all_last_step; ++first_rank_step ) + for (int first_rank_step = 0; first_rank_step < all_last_step; + ++first_rank_step) { MPI_Barrier(MPI_COMM_WORLD); @@ -851,44 +941,54 @@ hipace_like_write( std::string file_ending ) // step on the local rank int const step = first_rank_step - my_first_step; - if( verbose ) - std::cout << "[" << i_mpi_rank << "] " << - "step: " << step << " | first_ranks_step: " << first_rank_step << std::endl; + if (verbose) + std::cout << "[" << i_mpi_rank << "] " + << "step: " << step + << " | first_ranks_step: " << first_rank_step + << std::endl; // do we start writing to a new step? bool const start_new_output_step = - std::find(iterations.begin(), iterations.end(), first_rank_step) != iterations.end(); + std::find(iterations.begin(), iterations.end(), first_rank_step) != + iterations.end(); // are we just about to finish writing to a step? - // TODO; if we detect this, we can collectively call `it.close()` after storeChunk/flush() + // TODO; if we detect this, we can collectively call `it.close()` after + // storeChunk/flush() - // collectively: create a new iteration and declare records we want to write - if( verbose ) - std::cout << "[" << i_mpi_rank << "] " << - "start_new_output_step: " << start_new_output_step << std::endl; - if( start_new_output_step && false ) // looks like we don't even need that :) + // collectively: create a new iteration and declare records we want to + // write + if (verbose) + std::cout << "[" << i_mpi_rank << "] " + << "start_new_output_step: " << start_new_output_step + << std::endl; + if (start_new_output_step && + false) // looks like we don't even need that :) { auto it = series.iterations[first_rank_step]; auto E = it.meshes["E"]; // record auto E_x = E["x"]; // record component auto dataset = io::Dataset( - io::determineDatatype< precision >( ), - {global_Nx, global_Nz}); + io::determineDatatype(), {global_Nx, global_Nz}); E_x.resetDataset(dataset); - //series.flush(); + // series.flush(); } // has this ranks started computations yet? - if( step < 0 ) continue; + if (step < 0) + continue; // has this ranks stopped computations? - if( step > last_step ) continue; + if (step > last_step) + continue; // does this rank contribute to with output currently? bool const rank_in_output_step = - std::find(iterations.begin(), iterations.end(), step) != iterations.end(); - if( !rank_in_output_step ) continue; + std::find(iterations.begin(), iterations.end(), step) != + iterations.end(); + if (!rank_in_output_step) + continue; // now we write (parallel, independent I/O) auto it = series.iterations[step]; auto E = it.meshes["E"]; // record - auto E_x = E["x"]; // record component + auto E_x = E["x"]; // record component // some meta-data E.setAxisLabels({"z", "x"}); @@ -897,18 +997,22 @@ hipace_like_write( std::string file_ending ) E_x.setPosition({0.0, 0.0}); // update values - std::iota( E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [&step](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 100. + step ); }); + std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [&step](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 100. + step); + }); auto dataset = io::Dataset( - io::determineDatatype< precision >( ), - {global_Nx, global_Nz}); + io::determineDatatype(), {global_Nx, global_Nz}); E_x.resetDataset(dataset); Offset chunk_offset = {0, local_Nz * mpi_rank}; Extent chunk_extent = {global_Nx, local_Nz}; - auto const copyToShared = []( std::vector< precision > const & data ) { + auto const copyToShared = [](std::vector const &data) { auto d = std::shared_ptr( new precision[data.size()], std::default_delete()); std::copy(data.begin(), data.end(), d.get()); @@ -916,65 +1020,63 @@ hipace_like_write( std::string file_ending ) }; E_x.storeChunk( copyToShared(E_x_data), - //io::shareRaw(E_x_data), - chunk_offset, chunk_extent); + // io::shareRaw(E_x_data), + chunk_offset, + chunk_extent); series.flush(); } } -TEST_CASE( "hipace_like_write", "[parallel]" ) +TEST_CASE("hipace_like_write", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - hipace_like_write( t ); + hipace_like_write(t); } } #endif #if openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI -void -adios2_streaming( bool variableBasedLayout ) +void adios2_streaming(bool variableBasedLayout) { - int size{ -1 }; - int rank{ -1 }; - MPI_Comm_size( MPI_COMM_WORLD, &size ); - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + int size{-1}; + int rank{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; } - if( size < 2 || rank > 1 ) + if (size < 2 || rank > 1) { return; } constexpr size_t extent = 100; - if( rank == 0 ) + if (rank == 0) { // write - Series writeSeries( - "../samples/adios2_stream.sst", Access::CREATE ); - if( variableBasedLayout ) + Series writeSeries("../samples/adios2_stream.sst", Access::CREATE); + if (variableBasedLayout) { - writeSeries.setIterationEncoding( - IterationEncoding::variableBased ); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); } auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { extent } ) ); - std::vector< int > data( extent, i ); - E_x.storeChunk( data, { 0 }, { extent } ); + openPMD::Dataset(openPMD::Datatype::INT, {extent})); + std::vector data(extent, i); + E_x.storeChunk(data, {0}, {extent}); // we encourage manually closing iterations, but it should // not matter so let's do the switcharoo for this test - if( i % 2 == 0 ) + if (i % 2 == 0) { writeSeries.flush(); } @@ -984,7 +1086,7 @@ adios2_streaming( bool variableBasedLayout ) } } } - else if( rank == 1 ) + else if (rank == 1) { // read // it should be possible to select the sst engine via file ending or @@ -996,7 +1098,7 @@ adios2_streaming( bool variableBasedLayout ) * this avoids that the reader sees that file. */ using namespace std::chrono_literals; - std::this_thread::sleep_for( 1s ); + std::this_thread::sleep_for(1s); std::string options = R"( { "adios2": { @@ -1010,18 +1112,18 @@ adios2_streaming( bool variableBasedLayout ) Series readSeries( "../samples/adios2_stream.sst", Access::READ_ONLY, - "{\"defer_iteration_parsing\": true}" ); + "{\"defer_iteration_parsing\": true}"); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); // we encourage manually closing iterations, but it should // not matter so let's do the switcharoo for this test - if( last_iteration_index % 2 == 0 ) + if (last_iteration_index % 2 == 0) { readSeries.flush(); } @@ -1029,33 +1131,33 @@ adios2_streaming( bool variableBasedLayout ) { iteration.close(); } - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } } -TEST_CASE( "adios2_streaming", "[pseudoserial][adios2]" ) +TEST_CASE("adios2_streaming", "[pseudoserial][adios2]") { - adios2_streaming( true ); - adios2_streaming( false ); + adios2_streaming(true); + adios2_streaming(false); } -TEST_CASE( "parallel_adios2_json_config", "[parallel][adios2]" ) +TEST_CASE("parallel_adios2_json_config", "[parallel][adios2]") { - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; } - int size{ -1 }; - int rank{ -1 }; - MPI_Comm_size( MPI_COMM_WORLD, &size ); - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + int size{-1}; + int rank{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); std::string writeConfigBP3 = R"END( { @@ -1109,27 +1211,24 @@ TEST_CASE( "parallel_adios2_json_config", "[parallel][adios2]" ) } } )END"; - auto const write = [ size, rank ]( - std::string const & filename, - std::string const & config ) { - openPMD::Series series( - filename, openPMD::Access::CREATE, MPI_COMM_WORLD, config ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - openPMD::Dataset ds( - openPMD::Datatype::INT, { unsigned( size ), 1000 } ); - E_x.resetDataset( ds ); - std::vector< int > data( 1000, 0 ); - E_x.storeChunk( data, { unsigned( rank ), 0 }, { 1, 1000 } ); - series.flush(); - }; - write( "../samples/jsonConfiguredBP4Parallel.bp", writeConfigBP4 ); - write( "../samples/jsonConfiguredBP3Parallel.bp", writeConfigBP3 ); + auto const write = + [size, rank](std::string const &filename, std::string const &config) { + openPMD::Series series( + filename, openPMD::Access::CREATE, MPI_COMM_WORLD, config); + auto E_x = series.iterations[0].meshes["E"]["x"]; + openPMD::Dataset ds(openPMD::Datatype::INT, {unsigned(size), 1000}); + E_x.resetDataset(ds); + std::vector data(1000, 0); + E_x.storeChunk(data, {unsigned(rank), 0}, {1, 1000}); + series.flush(); + }; + write("../samples/jsonConfiguredBP4Parallel.bp", writeConfigBP4); + write("../samples/jsonConfiguredBP3Parallel.bp", writeConfigBP3); // BP3 engine writes files, BP4 writes directories - REQUIRE( - openPMD::auxiliary::file_exists( "../samples/jsonConfiguredBP3.bp" ) ); - REQUIRE( openPMD::auxiliary::directory_exists( - "../samples/jsonConfiguredBP4.bp" ) ); + REQUIRE(openPMD::auxiliary::file_exists("../samples/jsonConfiguredBP3.bp")); + REQUIRE(openPMD::auxiliary::directory_exists( + "../samples/jsonConfiguredBP4.bp")); std::string readConfigBP3 = R"END( { @@ -1152,124 +1251,116 @@ TEST_CASE( "parallel_adios2_json_config", "[parallel][adios2]" ) } )END"; auto const read = - [ size, rank ] - ( std::string const & filename, std::string const & config ) { + [size, rank](std::string const &filename, std::string const &config) { // let's write the config to a file and read it from there - if( rank == 0 ) + if (rank == 0) { std::fstream file; - file.open( "../samples/read_config.json", std::ios_base::out ); + file.open("../samples/read_config.json", std::ios_base::out); file << config; file.flush(); } - MPI_Barrier( MPI_COMM_WORLD ); + MPI_Barrier(MPI_COMM_WORLD); openPMD::Series series( filename, openPMD::Access::READ_ONLY, MPI_COMM_WORLD, - " @ ../samples/read_config.json " ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 2 ); - REQUIRE( E_x.getExtent()[ 0 ] == unsigned( size ) ); - REQUIRE( E_x.getExtent()[ 1 ] == 1000 ); - auto chunk = - E_x.loadChunk< int >( { unsigned( rank ), 0 }, { 1, 1000 } ); + " @ ../samples/read_config.json "); + auto E_x = series.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[0] == unsigned(size)); + REQUIRE(E_x.getExtent()[1] == 1000); + auto chunk = E_x.loadChunk({unsigned(rank), 0}, {1, 1000}); series.flush(); - for( size_t i = 0; i < 1000; ++i ) + for (size_t i = 0; i < 1000; ++i) { - REQUIRE( chunk.get()[ i ] == 0 ); + REQUIRE(chunk.get()[i] == 0); } }; - read( "../samples/jsonConfiguredBP3Parallel.bp", readConfigBP3 ); - read( "../samples/jsonConfiguredBP4Parallel.bp", readConfigBP4 ); + read("../samples/jsonConfiguredBP3Parallel.bp", readConfigBP3); + read("../samples/jsonConfiguredBP4Parallel.bp", readConfigBP4); } -void -adios2_ssc() +void adios2_ssc() { auto const extensions = openPMD::getFileExtensions(); - if( std::find( extensions.begin(), extensions.end(), "ssc" ) == - extensions.end() ) + if (std::find(extensions.begin(), extensions.end(), "ssc") == + extensions.end()) { // SSC engine not available in ADIOS2 return; } - int global_size{ -1 }; - int global_rank{ -1 }; - MPI_Comm_size( MPI_COMM_WORLD, &global_size ); - MPI_Comm_rank( MPI_COMM_WORLD, &global_rank ); - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + int global_size{-1}; + int global_rank{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &global_size); + MPI_Comm_rank(MPI_COMM_WORLD, &global_rank); + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; } - if( global_size < 2 ) + if (global_size < 2) { return; } int color = global_rank % 2; MPI_Comm local_comm; - MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &local_comm ); - int local_size{ -1 }; - int local_rank{ -1 }; - MPI_Comm_size( local_comm, &local_size ); - MPI_Comm_rank( local_comm, &local_rank ); + MPI_Comm_split(MPI_COMM_WORLD, color, global_rank, &local_comm); + int local_size{-1}; + int local_rank{-1}; + MPI_Comm_size(local_comm, &local_size); + MPI_Comm_rank(local_comm, &local_rank); constexpr size_t extent = 10; - if( color == 0 ) + if (color == 0) { // write Series writeSeries( - "../samples/adios2_stream.ssc", - Access::CREATE, - local_comm ); + "../samples/adios2_stream.ssc", Access::CREATE, local_comm); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( openPMD::Dataset( - openPMD::Datatype::INT, { unsigned( local_size ), extent } ) ); - std::vector< int > data( extent, i ); - E_x.storeChunk( - data, { unsigned( local_rank ), 0 }, { 1, extent } ); + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset(openPMD::Dataset( + openPMD::Datatype::INT, {unsigned(local_size), extent})); + std::vector data(extent, i); + E_x.storeChunk(data, {unsigned(local_rank), 0}, {1, extent}); iteration.close(); } } - else if( color == 1 ) + else if (color == 1) { // read Series readSeries( - "../samples/adios2_stream.ssc", - Access::READ_ONLY, - local_comm ); + "../samples/adios2_stream.ssc", Access::READ_ONLY, local_comm); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 2 ); - REQUIRE( E_x.getExtent()[ 1 ] == extent ); - auto chunk = E_x.loadChunk< int >( - { unsigned( local_rank ), 0 }, { 1, extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[1] == extent); + auto chunk = + E_x.loadChunk({unsigned(local_rank), 0}, {1, extent}); iteration.close(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } } -TEST_CASE( "adios2_ssc", "[parallel][adios2]" ) +TEST_CASE("adios2_ssc", "[parallel][adios2]") { adios2_ssc(); } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 412d8058e2..9a12deb411 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -1,7 +1,7 @@ // expose private and protected members for invasive testing #if openPMD_USE_INVASIVE_TESTS -# define OPENPMD_private public -# define OPENPMD_protected public +#define OPENPMD_private public +#define OPENPMD_protected public #endif #include "openPMD/auxiliary/Environment.hpp" @@ -24,33 +24,33 @@ #include #include #include -#include #include +#include #include #include #include using namespace openPMD; -std::vector< std::string > testedFileExtensions() +std::vector testedFileExtensions() { auto allExtensions = getFileExtensions(); auto newEnd = std::remove_if( - allExtensions.begin(), - allExtensions.end(), - []( std::string const & ext ) - { return ext == "sst" || ext == "ssc"; } ); - return { allExtensions.begin(), newEnd }; + allExtensions.begin(), allExtensions.end(), [](std::string const &ext) { + return ext == "sst" || ext == "ssc"; + }); + return {allExtensions.begin(), newEnd}; } #if openPMD_HAVE_ADIOS2 -TEST_CASE( "adios2_char_portability", "[serial][adios2]" ) +TEST_CASE("adios2_char_portability", "[serial][adios2]") { /* * This tests portability of char attributes in ADIOS2 in schema 20210209. */ - if( auxiliary::getEnvString("OPENPMD_NEW_ATTRIBUTE_LAYOUT", "NOT_SET") == "NOT_SET") + if (auxiliary::getEnvString("OPENPMD_NEW_ATTRIBUTE_LAYOUT", "NOT_SET") == + "NOT_SET") { /* * @todo As soon as we have added automatic detection for the new @@ -71,109 +71,105 @@ TEST_CASE( "adios2_char_portability", "[serial][adios2]" ) })END"; { adios2::ADIOS adios; - auto IO = adios.DeclareIO( "IO" ); + auto IO = adios.DeclareIO("IO"); auto engine = IO.Open( - "../samples/adios2_char_portability.bp", adios2::Mode::Write ); + "../samples/adios2_char_portability.bp", adios2::Mode::Write); engine.BeginStep(); // write default openPMD attributes - auto writeAttribute = - [ &engine, &IO ]( std::string const & name, auto value ) - { - using variable_type = decltype( value ); - engine.Put( IO.DefineVariable< variable_type >( name ), value ); + auto writeAttribute = [&engine, + &IO](std::string const &name, auto value) { + using variable_type = decltype(value); + engine.Put(IO.DefineVariable(name), value); }; - writeAttribute( "/basePath", std::string( "/data/%T/" ) ); - writeAttribute( "/date", std::string( "2021-02-22 11:14:00 +0000" ) ); - writeAttribute( "/iterationEncoding", std::string( "groupBased" ) ); - writeAttribute( "/iterationFormat", std::string( "/data/%T/" ) ); - writeAttribute( "/openPMD", std::string( "1.1.0" ) ); - writeAttribute( "/openPMDextension", uint32_t( 0 ) ); - writeAttribute( "/software", std::string( "openPMD-api" ) ); - writeAttribute( "/softwareVersion", std::string( "0.14.4" ) ); - - IO.DefineAttribute< uint64_t >( - "__openPMD_internal/openPMD2_adios2_schema", 20210209 ); - IO.DefineAttribute< unsigned char >( "__openPMD_internal/useSteps", 1 ); + writeAttribute("/basePath", std::string("/data/%T/")); + writeAttribute("/date", std::string("2021-02-22 11:14:00 +0000")); + writeAttribute("/iterationEncoding", std::string("groupBased")); + writeAttribute("/iterationFormat", std::string("/data/%T/")); + writeAttribute("/openPMD", std::string("1.1.0")); + writeAttribute("/openPMDextension", uint32_t(0)); + writeAttribute("/software", std::string("openPMD-api")); + writeAttribute("/softwareVersion", std::string("0.14.5")); + + IO.DefineAttribute( + "__openPMD_internal/openPMD2_adios2_schema", 20210209); + IO.DefineAttribute("__openPMD_internal/useSteps", 1); // write char things that should be read back properly std::string baseString = "abcdefghi"; // null termination not necessary, ADIOS knows the size of its variables - std::vector< signed char > signedVector( 9 ); - std::vector< unsigned char > unsignedVector( 9 ); - for( unsigned i = 0; i < 9; ++i ) + std::vector signedVector(9); + std::vector unsignedVector(9); + for (unsigned i = 0; i < 9; ++i) { - signedVector[ i ] = baseString[ i ]; - unsignedVector[ i ] = baseString[ i ]; + signedVector[i] = baseString[i]; + unsignedVector[i] = baseString[i]; } engine.Put( - IO.DefineVariable< signed char >( - "/signedVector", { 3, 3 }, { 0, 0 }, { 3, 3 } ), - signedVector.data() ); + IO.DefineVariable( + "/signedVector", {3, 3}, {0, 0}, {3, 3}), + signedVector.data()); engine.Put( - IO.DefineVariable< unsigned char >( - "/unsignedVector", { 3, 3 }, { 0, 0 }, { 3, 3 } ), - unsignedVector.data() ); + IO.DefineVariable( + "/unsignedVector", {3, 3}, {0, 0}, {3, 3}), + unsignedVector.data()); engine.Put( - IO.DefineVariable< char >( - "/unspecifiedVector", { 3, 3 }, { 0, 0 }, { 3, 3 } ), - baseString.c_str() ); + IO.DefineVariable( + "/unspecifiedVector", {3, 3}, {0, 0}, {3, 3}), + baseString.c_str()); - writeAttribute( "/signedChar", ( signed char )'a' ); - writeAttribute( "/unsignedChar", ( unsigned char )'a' ); - writeAttribute( "/char", ( char )'a' ); + writeAttribute("/signedChar", (signed char)'a'); + writeAttribute("/unsignedChar", (unsigned char)'a'); + writeAttribute("/char", (char)'a'); engine.EndStep(); engine.Close(); } { - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "ADIOS2" ) != - "ADIOS2" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "ADIOS2") != "ADIOS2") { return; } Series read( - "../samples/adios2_char_portability.bp", - Access::READ_ONLY, - config ); - auto signedVectorAttribute = read.getAttribute( "signedVector" ); - REQUIRE( signedVectorAttribute.dtype == Datatype::VEC_STRING ); - auto unsignedVectorAttribute = read.getAttribute( "unsignedVector" ); - REQUIRE( unsignedVectorAttribute.dtype == Datatype::VEC_STRING ); + "../samples/adios2_char_portability.bp", Access::READ_ONLY, config); + auto signedVectorAttribute = read.getAttribute("signedVector"); + REQUIRE(signedVectorAttribute.dtype == Datatype::VEC_STRING); + auto unsignedVectorAttribute = read.getAttribute("unsignedVector"); + REQUIRE(unsignedVectorAttribute.dtype == Datatype::VEC_STRING); auto unspecifiedVectorAttribute = - read.getAttribute( "unspecifiedVector" ); - REQUIRE( unspecifiedVectorAttribute.dtype == Datatype::VEC_STRING ); - std::vector< std::string > desiredVector{ "abc", "def", "ghi" }; + read.getAttribute("unspecifiedVector"); + REQUIRE(unspecifiedVectorAttribute.dtype == Datatype::VEC_STRING); + std::vector desiredVector{"abc", "def", "ghi"}; REQUIRE( - signedVectorAttribute.get< std::vector< std::string > >() == - desiredVector ); + signedVectorAttribute.get >() == + desiredVector); REQUIRE( - unsignedVectorAttribute.get< std::vector< std::string > >() == - desiredVector ); + unsignedVectorAttribute.get >() == + desiredVector); REQUIRE( - unspecifiedVectorAttribute.get< std::vector< std::string > >() == - desiredVector ); + unspecifiedVectorAttribute.get >() == + desiredVector); - auto signedCharAttribute = read.getAttribute( "signedChar" ); + auto signedCharAttribute = read.getAttribute("signedChar"); // we don't have that datatype yet // REQUIRE(unsignedCharAttribute.dtype == Datatype::SCHAR); - auto unsignedCharAttribute = read.getAttribute( "unsignedChar" ); - REQUIRE( unsignedCharAttribute.dtype == Datatype::UCHAR ); - auto charAttribute = read.getAttribute( "char" ); + auto unsignedCharAttribute = read.getAttribute("unsignedChar"); + REQUIRE(unsignedCharAttribute.dtype == Datatype::UCHAR); + auto charAttribute = read.getAttribute("char"); // might currently report Datatype::UCHAR on some platforms // REQUIRE(unsignedCharAttribute.dtype == Datatype::CHAR); - REQUIRE( signedCharAttribute.get< char >() == char( 'a' ) ); - REQUIRE( unsignedCharAttribute.get< char >() == char( 'a' ) ); - REQUIRE( charAttribute.get< char >() == char( 'a' ) ); + REQUIRE(signedCharAttribute.get() == char('a')); + REQUIRE(unsignedCharAttribute.get() == char('a')); + REQUIRE(charAttribute.get() == char('a')); } } #endif void write_and_read_many_iterations( - std::string const & ext, bool intermittentFlushes ) + std::string const &ext, bool intermittentFlushes) { // the idea here is to trigger the maximum allowed number of file handles, // e.g., the upper limit in "ulimit -n" (default: often 1024). Once this @@ -183,8 +179,10 @@ void write_and_read_many_iterations( // iteration is not dirty before closing // Our flushing logic must not forget to close even if the iteration is // otherwise untouched and needs not be flushed. - unsigned int nIterations = auxiliary::getEnvNum( "OPENPMD_TEST_NFILES_MAX", 1030 ); - std::string filename = "../samples/many_iterations/many_iterations_%T." + ext; + unsigned int nIterations = + auxiliary::getEnvNum("OPENPMD_TEST_NFILES_MAX", 1030); + std::string filename = + "../samples/many_iterations/many_iterations_%T." + ext; std::vector data(10); std::iota(data.begin(), data.end(), 0.); @@ -192,13 +190,14 @@ void write_and_read_many_iterations( { Series write(filename, Access::CREATE); - for (unsigned int i = 0; i < nIterations; ++i) { + for (unsigned int i = 0; i < nIterations; ++i) + { // std::cout << "Putting iteration " << i << std::endl; Iteration it = write.iterations[i]; auto E_x = it.meshes["E"]["x"]; - E_x.resetDataset( ds ); - E_x.storeChunk( data, { 0 }, { 10 } ); - if( intermittentFlushes ) + E_x.resetDataset(ds); + E_x.storeChunk(data, {0}, {10}); + if (intermittentFlushes) { write.flush(); } @@ -206,92 +205,104 @@ void write_and_read_many_iterations( } // ~Series intentionally not yet called - Series read( filename, Access::READ_ONLY, "{\"defer_iteration_parsing\": true}" ); - for( auto iteration : read.iterations ) + Series read( + filename, Access::READ_ONLY, "{\"defer_iteration_parsing\": true}"); + for (auto iteration : read.iterations) { iteration.second.open(); // std::cout << "Reading iteration " << iteration.first << // std::endl; - auto E_x = iteration.second.meshes[ "E" ][ "x" ]; - auto chunk = E_x.loadChunk< float >( { 0 }, { 10 } ); - if( intermittentFlushes ) + auto E_x = iteration.second.meshes["E"]["x"]; + auto chunk = E_x.loadChunk({0}, {10}); + if (intermittentFlushes) { read.flush(); } iteration.second.close(); auto array = chunk.get(); - for (size_t i = 0; i < 10; ++i) { + for (size_t i = 0; i < 10; ++i) + { REQUIRE(array[i] == float(i)); } } } - Series list( filename, Access::READ_ONLY ); - helper::listSeries( list ); + Series list(filename, Access::READ_ONLY); + helper::listSeries(list); } -TEST_CASE( "write_and_read_many_iterations", "[serial]" ) +TEST_CASE("write_and_read_many_iterations", "[serial]") { bool intermittentFlushes = false; - if( auxiliary::directory_exists( "../samples/many_iterations" ) ) - auxiliary::remove_directory( "../samples/many_iterations" ); - for( auto const & t : testedFileExtensions() ) + if (auxiliary::directory_exists("../samples/many_iterations")) + auxiliary::remove_directory("../samples/many_iterations"); + for (auto const &t : testedFileExtensions()) { - write_and_read_many_iterations( t, intermittentFlushes ); + write_and_read_many_iterations(t, intermittentFlushes); intermittentFlushes = !intermittentFlushes; } } -TEST_CASE( "multi_series_test", "[serial]" ) +TEST_CASE("multi_series_test", "[serial]") { - std::list< Series > allSeries; + std::list allSeries; auto myfileExtensions = testedFileExtensions(); - // this test demonstrates an ADIOS1 (upstream) bug, comment this section to trigger it - auto const rmEnd = std::remove_if( myfileExtensions.begin(), myfileExtensions.end(), [](std::string const & beit) { - return beit == "bp" && - determineFormat("test.bp") == Format::ADIOS1; - }); + // this test demonstrates an ADIOS1 (upstream) bug, comment this section to + // trigger it + auto const rmEnd = std::remove_if( + myfileExtensions.begin(), + myfileExtensions.end(), + [](std::string const &beit) { + return beit == "bp" && determineFormat("test.bp") == Format::ADIOS1; + }); myfileExtensions.erase(rmEnd, myfileExtensions.end()); // have multiple serial series alive at the same time - for (auto const sn : {1, 2, 3}) { - for (auto const & t: myfileExtensions) + for (auto const sn : {1, 2, 3}) + { + for (auto const &t : myfileExtensions) { auto const file_ending = t; std::cout << file_ending << std::endl; allSeries.emplace_back( - std::string("../samples/multi_open_test_"). - append(std::to_string(sn)).append(".").append(file_ending), - Access::CREATE - ); + std::string("../samples/multi_open_test_") + .append(std::to_string(sn)) + .append(".") + .append(file_ending), + Access::CREATE); allSeries.back().iterations[sn].setAttribute("wululu", sn); allSeries.back().flush(); } } // skip some series: sn=1 auto it = allSeries.begin(); - std::for_each( myfileExtensions.begin(), myfileExtensions.end(), [&it](std::string const &){ - it++; - }); + std::for_each( + myfileExtensions.begin(), + myfileExtensions.end(), + [&it](std::string const &) { it++; }); // remove some series: sn=2 - std::for_each( myfileExtensions.begin(), myfileExtensions.end(), [&it, &allSeries](std::string const &){ - it = allSeries.erase(it); - }); + std::for_each( + myfileExtensions.begin(), + myfileExtensions.end(), + [&it, &allSeries](std::string const &) { it = allSeries.erase(it); }); // write from last series: sn=3 - std::for_each( myfileExtensions.begin(), myfileExtensions.end(), [&it](std::string const &){ - it->iterations[10].setAttribute("wululu", 10); - it->flush(); - it++; - }); + std::for_each( + myfileExtensions.begin(), + myfileExtensions.end(), + [&it](std::string const &) { + it->iterations[10].setAttribute("wululu", 10); + it->flush(); + it++; + }); // remove all leftover series allSeries.clear(); } -TEST_CASE( "available_chunks_test_json", "[serial][json]" ) +TEST_CASE("available_chunks_test_json", "[serial][json]") { /* * This test is JSON specific @@ -325,63 +336,71 @@ TEST_CASE( "available_chunks_test_json", "[serial][json]" ) constexpr unsigned height = 10; std::string name = "../samples/available_chunks.json"; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE ); - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { height, 4 } } ); - for( unsigned line = 2; line < 7; ++line ) + Series write(name, Access::CREATE); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {height, 4}}); + for (unsigned line = 2; line < 7; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 4 } ); + E_x.storeChunk(data, {line, 0}, {1, 4}); } - for( unsigned line = 7; line < 9; ++line ) + for (unsigned line = 7; line < 9; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 2 } ); + E_x.storeChunk(data, {line, 0}, {1, 2}); } - E_x.storeChunk( data, { 8, 3 }, { 2, 1 } ); + E_x.storeChunk(data, {8, 3}, {2, 1}); - auto E_y = it0.meshes[ "E" ][ "y" ]; - E_y.resetDataset( { Datatype::INT, { height, 4 } } ); - E_y.makeConstant( 1234 ); + auto E_y = it0.meshes["E"]["y"]; + E_y.resetDataset({Datatype::INT, {height, 4}}); + E_y.makeConstant(1234); it0.close(); } { - Series read( name, Access::READ_ONLY ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; + Series read(name, Access::READ_ONLY); + Iteration it0 = read.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); - REQUIRE( table.size() == 3 ); + REQUIRE(table.size() == 3); /* * Explicitly convert things to bool, so Catch doesn't get the splendid * idea to print the Chunk struct. */ - REQUIRE( bool( table[ 0 ] == WrittenChunkInfo( { 2, 0 }, { 5, 4 } ) ) ); - REQUIRE( bool( table[ 1 ] == WrittenChunkInfo( { 7, 0 }, { 2, 2 } ) ) ); - REQUIRE( bool( table[ 2 ] == WrittenChunkInfo( { 8, 3 }, { 2, 1 } ) ) ); + REQUIRE(bool(table[0] == WrittenChunkInfo({2, 0}, {5, 4}))); + REQUIRE(bool(table[1] == WrittenChunkInfo({7, 0}, {2, 2}))); + REQUIRE(bool(table[2] == WrittenChunkInfo({8, 3}, {2, 1}))); - auto E_y = it0.meshes[ "E" ][ "y" ]; + auto E_y = it0.meshes["E"]["y"]; table = E_y.availableChunks(); - REQUIRE( table.size() == 1 ); - REQUIRE( - bool( table[ 0 ] == WrittenChunkInfo( { 0, 0 }, { height, 4 } ) ) ); + REQUIRE(table.size() == 1); + REQUIRE(bool(table[0] == WrittenChunkInfo({0, 0}, {height, 4}))); } } -TEST_CASE( "multiple_series_handles_test", "[serial]" ) +TEST_CASE("multiple_series_handles_test", "[serial]") { + /* + * clang also understands these pragmas. + */ +#if defined(__GNUC_MINOR__) && !defined(__INTEL_COMPILER) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#elif defined(__clang__) +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif /* * First test: No premature flushes through destructor when another copy * is still around */ { - std::unique_ptr< openPMD::Series > series_ptr; + std::unique_ptr series_ptr; { openPMD::Series series( - "sample%T.json", openPMD::AccessType::CREATE ); - series_ptr = std::make_unique< openPMD::Series >( series ); + "sample%T.json", openPMD::AccessType::CREATE); + series_ptr = std::make_unique(series); /* * we have two handles for the same Series instance now: * series and series_ptr @@ -391,20 +410,19 @@ TEST_CASE( "multiple_series_handles_test", "[serial]" ) * since no iteration has been written yet, an error will be thrown */ } - series_ptr->iterations[ 0 ].meshes[ "E" ][ "x" ].makeEmpty< int >( 1 ); + series_ptr->iterations[0].meshes["E"]["x"].makeEmpty(1); } /* * Second test: A Series handle should remain accessible even if the * original handle is destroyed */ { - std::unique_ptr< openPMD::Series > series_ptr; + std::unique_ptr series_ptr; { openPMD::Series series( - "sample%T.json", openPMD::AccessType::CREATE ); - series_ptr = std::make_unique< openPMD::Series >( series ); - series_ptr->iterations[ 0 ].meshes[ "E" ][ "x" ].makeEmpty< int >( - 1 ); + "sample%T.json", openPMD::AccessType::CREATE); + series_ptr = std::make_unique(series); + series_ptr->iterations[0].meshes["E"]["x"].makeEmpty(1); } /* * series_ptr is still in scope, but the original Series instance @@ -415,10 +433,14 @@ TEST_CASE( "multiple_series_handles_test", "[serial]" ) */ series_ptr->flush(); } +#if defined(__GNUC_MINOR__) && !defined(__INTEL_COMPILER) +#pragma GCC diagnostic pop +#elif defined(__clang__) +#pragma clang diagnostic pop +#endif } -void -close_iteration_test( std::string file_ending ) +void close_iteration_test(std::string file_ending) { std::string name = "../samples/close_iterations_%T." + file_ending; @@ -427,11 +449,11 @@ close_iteration_test( std::string file_ending ) Series write(name, Access::CREATE); bool isAdios1 = write.backend() == "ADIOS1"; { - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 2, 2 } ); - it0.close( /* flush = */ false ); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {2, 2}); + it0.close(/* flush = */ false); } write.flush(); // } @@ -440,202 +462,197 @@ close_iteration_test( std::string file_ending ) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( "../samples/close_iterations_0.bp" ) ); + REQUIRE(auxiliary::file_exists("../samples/close_iterations_0.bp")); } else { - Series read( name, Access::READ_ONLY ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x_read = it0.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { 2, 2 } ); - it0.close( /* flush = */ false ); + Series read(name, Access::READ_ONLY); + Iteration it0 = read.iterations[0]; + auto E_x_read = it0.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {2, 2}); + it0.close(/* flush = */ false); read.flush(); - for( size_t i = 0; i < data.size(); ++i ) + for (size_t i = 0; i < data.size(); ++i) { - REQUIRE( data[ i ] == chunk.get()[ i ] ); + REQUIRE(data[i] == chunk.get()[i]); } } { Iteration it1 = write.iterations[1]; - auto E_x = it1.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 2, 2 } ); - it1.close( /* flush = */ true ); + auto E_x = it1.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {2, 2}); + it1.close(/* flush = */ true); // illegally access iteration after closing - E_x.storeChunk( data, { 0, 0 }, { 2, 2 } ); - REQUIRE_THROWS( write.flush() ); + E_x.storeChunk(data, {0, 0}, {2, 2}); + REQUIRE_THROWS(write.flush()); } if (isAdios1) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( "../samples/close_iterations_1.bp" ) ); + REQUIRE(auxiliary::file_exists("../samples/close_iterations_1.bp")); } else { - Series read( name, Access::READ_ONLY ); - Iteration it1 = read.iterations[ 1 ]; - auto E_x_read = it1.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { 2, 2 } ); - it1.close( /* flush = */ true ); - for( size_t i = 0; i < data.size(); ++i ) + Series read(name, Access::READ_ONLY); + Iteration it1 = read.iterations[1]; + auto E_x_read = it1.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {2, 2}); + it1.close(/* flush = */ true); + for (size_t i = 0; i < data.size(); ++i) { - REQUIRE( data[ i ] == chunk.get()[ i ] ); + REQUIRE(data[i] == chunk.get()[i]); } - auto read_again = E_x_read.loadChunk< int >( { 0, 0 }, { 2, 2 } ); - REQUIRE_THROWS( read.flush() ); + auto read_again = E_x_read.loadChunk({0, 0}, {2, 2}); + REQUIRE_THROWS(read.flush()); } { - Series list{ name, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{name, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "close_iteration_test", "[serial]" ) +TEST_CASE("close_iteration_test", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - close_iteration_test( t ); + close_iteration_test(t); } } -void -close_iteration_interleaved_test( std::string const file_ending, - IterationEncoding const it_encoding ) +void close_iteration_interleaved_test( + std::string const file_ending, IterationEncoding const it_encoding) { std::string name = "../samples/close_iterations_interleaved_"; - if( it_encoding == IterationEncoding::fileBased ) - name.append( "f_%T" ); - else if( it_encoding == IterationEncoding::groupBased ) - name.append( "g" ); - else if( it_encoding == IterationEncoding::variableBased ) - name.append( "v" ); - name.append( "." ).append( file_ending ); + if (it_encoding == IterationEncoding::fileBased) + name.append("f_%T"); + else if (it_encoding == IterationEncoding::groupBased) + name.append("g"); + else if (it_encoding == IterationEncoding::variableBased) + name.append("v"); + name.append(".").append(file_ending); std::cout << name << std::endl; std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE ); - write.setIterationEncoding( it_encoding ); + Series write(name, Access::CREATE); + write.setIterationEncoding(it_encoding); // interleaved write pattern - Iteration it1 = write.iterations[ 1 ]; - auto E_x = it1.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 1, 2 } ); + Iteration it1 = write.iterations[1]; + auto E_x = it1.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {1, 2}); E_x.seriesFlush(); - Iteration it2 = write.iterations[ 2 ]; - E_x = it2.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 1, 2 } ); + Iteration it2 = write.iterations[2]; + E_x = it2.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {1, 2}); E_x.seriesFlush(); - E_x = it1.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 0 }, { 1, 1 } ); + E_x = it1.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 0}, {1, 1}); E_x.seriesFlush(); - E_x = it2.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 0 }, { 1, 1 } ); + E_x = it2.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 0}, {1, 1}); E_x.seriesFlush(); // now we start a third iteration - Iteration it3 = write.iterations[ 3 ]; - E_x = it3.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 1, 2 } ); + Iteration it3 = write.iterations[3]; + E_x = it3.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {1, 2}); E_x.seriesFlush(); // let's finish writing to 1 and 2 - E_x = it1.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 1 }, { 1, 1 } ); + E_x = it1.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 1}, {1, 1}); E_x.seriesFlush(); it1.close(); - E_x = it2.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 1 }, { 1, 1 } ); + E_x = it2.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 1}, {1, 1}); E_x.seriesFlush(); it2.close(); - E_x = it3.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 0 }, { 1, 2 } ); + E_x = it3.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 0}, {1, 2}); E_x.seriesFlush(); it3.close(); } } -TEST_CASE( "close_iteration_interleaved_test", "[serial]" ) +TEST_CASE("close_iteration_interleaved_test", "[serial]") { bool const bp_prefer_adios1 = - ( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ); + (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1"); - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { //! @FIXME ADIOS1 bugs with Iteration::close() - if( bp_prefer_adios1 ) + if (bp_prefer_adios1) continue; - close_iteration_interleaved_test( t, IterationEncoding::fileBased ); - close_iteration_interleaved_test( t, IterationEncoding::groupBased ); + close_iteration_interleaved_test(t, IterationEncoding::fileBased); + close_iteration_interleaved_test(t, IterationEncoding::groupBased); // run this test for ADIOS2 & JSON only - if( t == "h5" ) + if (t == "h5") continue; - if( t == "bp" && bp_prefer_adios1 ) + if (t == "bp" && bp_prefer_adios1) continue; - close_iteration_interleaved_test( t, IterationEncoding::variableBased ); + close_iteration_interleaved_test(t, IterationEncoding::variableBased); } } -void -close_and_copy_attributable_test( std::string file_ending ) +void close_and_copy_attributable_test(std::string file_ending) { using position_t = double; // open file for writing - Series series( "electrons." + file_ending, Access::CREATE ); + Series series("electrons." + file_ending, Access::CREATE); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], [](position_t const *ptr) { delete[] ptr; }); - std::unique_ptr< Iteration > iteration_ptr; - for( size_t i = 0; i < 100; i+=10 ) + std::unique_ptr iteration_ptr; + for (size_t i = 0; i < 100; i += 10) { - if( iteration_ptr ) + if (iteration_ptr) { - *iteration_ptr = series.iterations[ i ]; + *iteration_ptr = series.iterations[i]; } else { // use copy constructor - iteration_ptr = - std::make_unique< Iteration >( series.iterations[ i ] ); + iteration_ptr = std::make_unique(series.iterations[i]); } - Record electronPositions = - iteration_ptr->particles[ "e" ][ "position" ]; + Record electronPositions = iteration_ptr->particles["e"]["position"]; // TODO set this automatically to zero if not provided Record electronPositionsOffset = - iteration_ptr->particles[ "e" ][ "positionOffset" ]; + iteration_ptr->particles["e"]["positionOffset"]; - std::iota( local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); - RecordComponent posOff = electronPositionsOffset[ dim ]; - posOff.resetDataset( dataset ); - posOff.makeConstant( position_t( 0.0 ) ); + RecordComponent posOff = electronPositionsOffset[dim]; + posOff.resetDataset(dataset); + posOff.makeConstant(position_t(0.0)); } iteration_ptr->close(); // force re-flush of previous iterations @@ -643,351 +660,442 @@ close_and_copy_attributable_test( std::string file_ending ) } } -TEST_CASE( "close_and_copy_attributable_test", "[serial]" ) +TEST_CASE("close_and_copy_attributable_test", "[serial]") { // demonstrator for https://github.com/openPMD/openPMD-api/issues/765 - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - close_and_copy_attributable_test( t ); + close_and_copy_attributable_test(t); } } #if openPMD_HAVE_ADIOS2 -TEST_CASE( "close_iteration_throws_test", "[serial]" ) +TEST_CASE("close_iteration_throws_test", "[serial]") { /* * Iterations should not be accessed any more after closing. * Test that the openPMD API detects that case and throws. */ { - Series series( - "../samples/close_iteration_throws_1.bp", Access::CREATE ); - auto it0 = series.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 5 } } ); - std::vector< int > data{ 0, 1, 2, 3, 4 }; - E_x.storeChunk( data, { 0 }, { 5 } ); + Series series("../samples/close_iteration_throws_1.bp", Access::CREATE); + auto it0 = series.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {5}}); + std::vector data{0, 1, 2, 3, 4}; + E_x.storeChunk(data, {0}, {5}); it0.close(); - auto B_y = it0.meshes[ "B" ][ "y" ]; - B_y.resetDataset( { Datatype::INT, { 5 } } ); - B_y.storeChunk( data, { 0 }, { 5 } ); - REQUIRE_THROWS( series.flush() ); + auto B_y = it0.meshes["B"]["y"]; + B_y.resetDataset({Datatype::INT, {5}}); + B_y.storeChunk(data, {0}, {5}); + REQUIRE_THROWS(series.flush()); } { - Series series( - "../samples/close_iteration_throws_2.bp", Access::CREATE ); - auto it0 = series.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 5 } } ); - std::vector< int > data{ 0, 1, 2, 3, 4 }; - E_x.storeChunk( data, { 0 }, { 5 } ); + Series series("../samples/close_iteration_throws_2.bp", Access::CREATE); + auto it0 = series.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {5}}); + std::vector data{0, 1, 2, 3, 4}; + E_x.storeChunk(data, {0}, {5}); it0.close(); - auto e_position_x = it0.particles[ "e" ][ "position" ][ "x" ]; - e_position_x.resetDataset( { Datatype::INT, { 5 } } ); - e_position_x.storeChunk( data, { 0 }, { 5 } ); - REQUIRE_THROWS( series.flush() ); + auto e_position_x = it0.particles["e"]["position"]["x"]; + e_position_x.resetDataset({Datatype::INT, {5}}); + e_position_x.storeChunk(data, {0}, {5}); + REQUIRE_THROWS(series.flush()); } { - Series series( - "../samples/close_iteration_throws_3.bp", Access::CREATE ); - auto it0 = series.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 5 } } ); - std::vector< int > data{ 0, 1, 2, 3, 4 }; - E_x.storeChunk( data, { 0 }, { 5 } ); + Series series("../samples/close_iteration_throws_3.bp", Access::CREATE); + auto it0 = series.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {5}}); + std::vector data{0, 1, 2, 3, 4}; + E_x.storeChunk(data, {0}, {5}); it0.close(); - it0.setTimeUnitSI( 2.0 ); - REQUIRE_THROWS( series.flush() ); + it0.setTimeUnitSI(2.0); + REQUIRE_THROWS(series.flush()); } } #endif -inline void -empty_dataset_test( std::string file_ending ) +inline void empty_dataset_test(std::string file_ending) { { Series series( - "../samples/empty_datasets." + file_ending, Access::CREATE ); + "../samples/empty_datasets." + file_ending, Access::CREATE); auto makeEmpty_dim_7_int = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int"]; auto makeEmpty_dim_7_long = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool"]; auto makeEmpty_dim_7_int_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int_alt"]; auto makeEmpty_dim_7_long_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool_alt"]; auto makeEmpty_resetDataset_dim3 = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_resetDataset_dim3" ]; + series.iterations[1].meshes["rho"]["makeEmpty_resetDataset_dim3"]; auto makeEmpty_resetDataset_dim3_notallzero = - series.iterations[ 1 ] - .meshes[ "rho" ][ "makeEmpty_resetDataset_dim3_notallzero" ]; - makeEmpty_dim_7_int.makeEmpty< int >( 7 ); - makeEmpty_dim_7_long.makeEmpty< long >( 7 ); - makeEmpty_dim_7_int_alt.makeEmpty( Datatype::INT, 7 ); - makeEmpty_dim_7_long_alt.makeEmpty( Datatype::LONG, 7 ); + series.iterations[1] + .meshes["rho"]["makeEmpty_resetDataset_dim3_notallzero"]; + makeEmpty_dim_7_int.makeEmpty(7); + makeEmpty_dim_7_long.makeEmpty(7); + makeEmpty_dim_7_int_alt.makeEmpty(Datatype::INT, 7); + makeEmpty_dim_7_long_alt.makeEmpty(Datatype::LONG, 7); makeEmpty_resetDataset_dim3.resetDataset( - Dataset( Datatype::LONG, Extent( 3, 0 ) ) ); + Dataset(Datatype::LONG, Extent(3, 0))); makeEmpty_resetDataset_dim3_notallzero.resetDataset( - Dataset( Datatype::LONG_DOUBLE, Extent{ 1, 2, 0 } ) ); + Dataset(Datatype::LONG_DOUBLE, Extent{1, 2, 0})); series.flush(); - } { Series series( - "../samples/empty_datasets." + file_ending, Access::READ_ONLY ); + "../samples/empty_datasets." + file_ending, Access::READ_ONLY); REQUIRE(series.iterations.contains(1)); REQUIRE(series.iterations.count(1) == 1); REQUIRE(series.iterations.count(123456) == 0); REQUIRE(series.iterations[1].meshes.contains("rho")); - REQUIRE(series.iterations[1].meshes["rho"].contains("makeEmpty_dim_7_int")); + REQUIRE( + series.iterations[1].meshes["rho"].contains("makeEmpty_dim_7_int")); auto makeEmpty_dim_7_int = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int"]; auto makeEmpty_dim_7_long = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool"]; auto makeEmpty_dim_7_int_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int_alt"]; auto makeEmpty_dim_7_long_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool_alt"]; auto makeEmpty_resetDataset_dim3 = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_resetDataset_dim3" ]; + series.iterations[1].meshes["rho"]["makeEmpty_resetDataset_dim3"]; auto makeEmpty_resetDataset_dim3_notallzero = - series.iterations[ 1 ] - .meshes[ "rho" ][ "makeEmpty_resetDataset_dim3_notallzero" ]; + series.iterations[1] + .meshes["rho"]["makeEmpty_resetDataset_dim3_notallzero"]; REQUIRE(makeEmpty_dim_7_int.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_int.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_int.getDatatype(), determineDatatype< int >())); + REQUIRE(isSame( + makeEmpty_dim_7_int.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_dim_7_long.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_long.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_long.getDatatype(), determineDatatype< long >())); + REQUIRE(isSame( + makeEmpty_dim_7_long.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_dim_7_int_alt.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_int_alt.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_int_alt.getDatatype(), determineDatatype< int >())); + REQUIRE(isSame( + makeEmpty_dim_7_int_alt.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_dim_7_long_alt.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_long_alt.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_long_alt.getDatatype(), determineDatatype< long >())); + REQUIRE(isSame( + makeEmpty_dim_7_long_alt.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_resetDataset_dim3.getDimensionality() == 3); REQUIRE(makeEmpty_resetDataset_dim3.getExtent() == Extent(3, 0)); - REQUIRE(isSame(makeEmpty_resetDataset_dim3.getDatatype(), Datatype::LONG)); + REQUIRE( + isSame(makeEmpty_resetDataset_dim3.getDatatype(), Datatype::LONG)); - REQUIRE(makeEmpty_resetDataset_dim3_notallzero.getDimensionality() == 3); - REQUIRE(makeEmpty_resetDataset_dim3_notallzero.getExtent() == Extent{1,2,0}); - REQUIRE(isSame(makeEmpty_resetDataset_dim3_notallzero.getDatatype(), Datatype::LONG_DOUBLE)); + REQUIRE( + makeEmpty_resetDataset_dim3_notallzero.getDimensionality() == 3); + REQUIRE( + makeEmpty_resetDataset_dim3_notallzero.getExtent() == + Extent{1, 2, 0}); + REQUIRE(isSame( + makeEmpty_resetDataset_dim3_notallzero.getDatatype(), + Datatype::LONG_DOUBLE)); } { - Series list{ "../samples/empty_datasets." + file_ending, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/empty_datasets." + file_ending, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "empty_dataset_test", "[serial]" ) +TEST_CASE("empty_dataset_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - empty_dataset_test( t ); + empty_dataset_test(t); } } -inline -void constant_scalar(std::string file_ending) +inline void constant_scalar(std::string file_ending) { Mesh::Geometry const geometry = Mesh::Geometry::spherical; std::string const geometryParameters = "dummyGeometryParameters"; Mesh::DataOrder const dataOrder = Mesh::DataOrder::F; - std::vector< double > const gridSpacing { 1.0, 2.0, 3.0 }; - std::vector< double > const gridGlobalOffset{ 11.0, 22.0, 33.0 }; + std::vector const gridSpacing{1.0, 2.0, 3.0}; + std::vector const gridGlobalOffset{11.0, 22.0, 33.0}; double const gridUnitSI = 3.14; - std::vector< std::string > const axisLabels { "x", "y", "z" }; - std::map< UnitDimension, double > const unitDimensions{ - {UnitDimension::I, 1.0}, - {UnitDimension::J, 2.0} - }; + std::vector const axisLabels{"x", "y", "z"}; + std::map const unitDimensions{ + {UnitDimension::I, 1.0}, {UnitDimension::J, 2.0}}; double const timeOffset = 1234.0; { // constant scalar - Series s = Series("../samples/constant_scalar." + file_ending, Access::CREATE); + Series s = + Series("../samples/constant_scalar." + file_ending, Access::CREATE); auto rho = s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR]; REQUIRE(s.iterations[1].meshes["rho"].scalar()); rho.resetDataset(Dataset(Datatype::CHAR, {1, 2, 3})); - rho.makeConstant(static_cast< char >('a')); + rho.makeConstant(static_cast('a')); REQUIRE(rho.constant()); // mixed constant/non-constant auto E_x = s.iterations[1].meshes["E"]["x"]; E_x.resetDataset(Dataset(Datatype::FLOAT, {1, 2, 3})); - E_x.makeConstant(static_cast< float >(13.37)); + E_x.makeConstant(static_cast(13.37)); auto E_y = s.iterations[1].meshes["E"]["y"]; E_y.resetDataset(Dataset(Datatype::UINT, {1, 2, 3})); - std::shared_ptr< unsigned int > E(new unsigned int[6], [](unsigned int const *p){ delete[] p; }); + std::shared_ptr E( + new unsigned int[6], [](unsigned int const *p) { delete[] p; }); unsigned int e{0}; - std::generate(E.get(), E.get() + 6, [&e]{ return e++; }); + std::generate(E.get(), E.get() + 6, [&e] { return e++; }); E_y.storeChunk(E, {0, 0, 0}, {1, 2, 3}); // store a number of predefined attributes in E - Mesh & E_mesh = s.iterations[1].meshes["E"]; - E_mesh.setGeometry( geometry ); - E_mesh.setGeometryParameters( geometryParameters ); - E_mesh.setDataOrder( dataOrder ); - E_mesh.setGridSpacing( gridSpacing ); - E_mesh.setGridGlobalOffset( gridGlobalOffset ); - E_mesh.setGridUnitSI( gridUnitSI ); - E_mesh.setAxisLabels( axisLabels ); + Mesh &E_mesh = s.iterations[1].meshes["E"]; + E_mesh.setGeometry(geometry); + E_mesh.setGeometryParameters(geometryParameters); + E_mesh.setDataOrder(dataOrder); + E_mesh.setGridSpacing(gridSpacing); + E_mesh.setGridGlobalOffset(gridGlobalOffset); + E_mesh.setGridUnitSI(gridUnitSI); + E_mesh.setAxisLabels(axisLabels); E_mesh.setUnitDimension(unitDimensions); - E_mesh.setTimeOffset( timeOffset ); + E_mesh.setTimeOffset(timeOffset); // constant scalar - auto pos = s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR]; + auto pos = + s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR]; pos.resetDataset(Dataset(Datatype::DOUBLE, {3, 2, 1})); - pos.makeConstant(static_cast< double >(42.)); - auto posOff = s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR]; + pos.makeConstant(static_cast(42.)); + auto posOff = + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR]; posOff.resetDataset(Dataset(Datatype::INT, {3, 2, 1})); - posOff.makeConstant(static_cast< int >(-42)); + posOff.makeConstant(static_cast(-42)); // mixed constant/non-constant auto vel_x = s.iterations[1].particles["e"]["velocity"]["x"]; vel_x.resetDataset(Dataset(Datatype::SHORT, {3, 2, 1})); - vel_x.makeConstant(static_cast< short >(-1)); + vel_x.makeConstant(static_cast(-1)); auto vel_y = s.iterations[1].particles["e"]["velocity"]["y"]; vel_y.resetDataset(Dataset(Datatype::ULONGLONG, {3, 2, 1})); - std::shared_ptr< unsigned long long > vel(new unsigned long long[6], [](unsigned long long const *p){ delete[] p; }); + std::shared_ptr vel( + new unsigned long long[6], + [](unsigned long long const *p) { delete[] p; }); unsigned long long v{0}; - std::generate(vel.get(), vel.get() + 6, [&v]{ return v++; }); + std::generate(vel.get(), vel.get() + 6, [&v] { return v++; }); vel_y.storeChunk(vel, {0, 0, 0}, {3, 2, 1}); } { - Series s = Series("../samples/constant_scalar." + file_ending, Access::READ_ONLY); + Series s = Series( + "../samples/constant_scalar." + file_ending, Access::READ_ONLY); REQUIRE(s.iterations[1].meshes.count("rho") == 1); - REQUIRE(s.iterations[1].meshes["rho"].count(MeshRecordComponent::SCALAR) == 1); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].containsAttribute("shape")); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{1, 2, 3}); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].containsAttribute("value")); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].getAttribute("value").get< char >() == 'a'); + REQUIRE( + s.iterations[1].meshes["rho"].count(MeshRecordComponent::SCALAR) == + 1); + REQUIRE(s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .containsAttribute("shape")); + REQUIRE( + s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .getAttribute("shape") + .get >() == Extent{1, 2, 3}); + REQUIRE(s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .containsAttribute("value")); + REQUIRE( + s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .getAttribute("value") + .get() == 'a'); REQUIRE(s.iterations[1].meshes["rho"].scalar()); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].constant()); + REQUIRE(s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .constant()); REQUIRE(s.iterations[1].meshes.count("E") == 1); REQUIRE(!s.iterations[1].meshes["E"].scalar()); REQUIRE(s.iterations[1].meshes["E"].count("x") == 1); REQUIRE(s.iterations[1].meshes["E"]["x"].constant()); REQUIRE(s.iterations[1].meshes["E"]["x"].containsAttribute("shape")); - REQUIRE(s.iterations[1].meshes["E"]["x"].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{1, 2, 3}); + REQUIRE( + s.iterations[1] + .meshes["E"]["x"] + .getAttribute("shape") + .get >() == Extent{1, 2, 3}); REQUIRE(s.iterations[1].meshes["E"]["x"].containsAttribute("value")); - REQUIRE(s.iterations[1].meshes["E"]["x"].getAttribute("value").get< float >() == static_cast< float >(13.37)); + REQUIRE( + s.iterations[1] + .meshes["E"]["x"] + .getAttribute("value") + .get() == static_cast(13.37)); REQUIRE(!s.iterations[1].meshes["E"]["y"].constant()); - REQUIRE(s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); + REQUIRE( + s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); REQUIRE(s.iterations[1].meshes["E"].count("y") == 1); REQUIRE(!s.iterations[1].meshes["E"]["y"].containsAttribute("shape")); REQUIRE(!s.iterations[1].meshes["E"]["y"].containsAttribute("value")); - REQUIRE(s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); + REQUIRE( + s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); REQUIRE(s.iterations[1].particles.count("e") == 1); REQUIRE(s.iterations[1].particles["e"].count("position") == 1); - REQUIRE(s.iterations[1].particles["e"]["position"].count(RecordComponent::SCALAR) == 1); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].containsAttribute("shape")); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{3, 2, 1}); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].getAttribute("value").get< double >() == 42.); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].getExtent() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["position"].count( + RecordComponent::SCALAR) == 1); + REQUIRE(s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .containsAttribute("shape")); + REQUIRE( + s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .getAttribute("shape") + .get >() == Extent{3, 2, 1}); + REQUIRE(s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .containsAttribute("value")); + REQUIRE( + s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .getAttribute("value") + .get() == 42.); + REQUIRE( + s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .getExtent() == Extent{3, 2, 1}); REQUIRE(s.iterations[1].particles["e"].count("positionOffset") == 1); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"].count(RecordComponent::SCALAR) == 1); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].containsAttribute("shape")); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{3, 2, 1}); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].getAttribute("value").get< int >() == -42); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].getExtent() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["positionOffset"].count( + RecordComponent::SCALAR) == 1); + REQUIRE(s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .containsAttribute("shape")); + REQUIRE( + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .getAttribute("shape") + .get >() == Extent{3, 2, 1}); + REQUIRE(s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .containsAttribute("value")); + REQUIRE( + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .getAttribute("value") + .get() == -42); + REQUIRE( + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .getExtent() == Extent{3, 2, 1}); REQUIRE(s.iterations[1].particles["e"].count("velocity") == 1); REQUIRE(s.iterations[1].particles["e"]["velocity"].count("x") == 1); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute("shape")); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{3, 2, 1}); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].getAttribute("value").get< short >() == -1); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].getExtent() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute( + "shape")); + REQUIRE( + s.iterations[1] + .particles["e"]["velocity"]["x"] + .getAttribute("shape") + .get >() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute( + "value")); + REQUIRE( + s.iterations[1] + .particles["e"]["velocity"]["x"] + .getAttribute("value") + .get() == -1); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["x"].getExtent() == + Extent{3, 2, 1}); REQUIRE(s.iterations[1].particles["e"]["velocity"].count("y") == 1); - REQUIRE(!s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute("shape")); - REQUIRE(!s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["y"].getExtent() == Extent{3, 2, 1}); - - Mesh & E_mesh = s.iterations[1].meshes["E"]; - REQUIRE( E_mesh.geometry() == geometry ); - REQUIRE( E_mesh.geometryParameters() == geometryParameters ); - REQUIRE( E_mesh.dataOrder() == dataOrder ); - REQUIRE( E_mesh.gridSpacing< double >() == gridSpacing ); - REQUIRE( E_mesh.gridGlobalOffset() == gridGlobalOffset ); - REQUIRE( E_mesh.gridUnitSI() == gridUnitSI ); - REQUIRE( E_mesh.axisLabels() == axisLabels ); + REQUIRE( + !s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute( + "shape")); + REQUIRE( + !s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute( + "value")); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["y"].getExtent() == + Extent{3, 2, 1}); + + Mesh &E_mesh = s.iterations[1].meshes["E"]; + REQUIRE(E_mesh.geometry() == geometry); + REQUIRE(E_mesh.geometryParameters() == geometryParameters); + REQUIRE(E_mesh.dataOrder() == dataOrder); + REQUIRE(E_mesh.gridSpacing() == gridSpacing); + REQUIRE(E_mesh.gridGlobalOffset() == gridGlobalOffset); + REQUIRE(E_mesh.gridUnitSI() == gridUnitSI); + REQUIRE(E_mesh.axisLabels() == axisLabels); // REQUIRE( E_mesh.unitDimension() == unitDimensions ); - REQUIRE( E_mesh.timeOffset< double >() == timeOffset ); + REQUIRE(E_mesh.timeOffset() == timeOffset); - auto E_x_value = s.iterations[1].meshes["E"]["x"].loadChunk< float >(); + auto E_x_value = s.iterations[1].meshes["E"]["x"].loadChunk(); s.flush(); - for( int idx = 0; idx < 1*2*3; ++idx ) - REQUIRE( E_x_value.get()[idx] == static_cast< float >(13.37) ); + for (int idx = 0; idx < 1 * 2 * 3; ++idx) + REQUIRE(E_x_value.get()[idx] == static_cast(13.37)); } { - Series list{ "../samples/constant_scalar." + file_ending, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/constant_scalar." + file_ending, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "constant_scalar", "[serial]" ) +TEST_CASE("constant_scalar", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - constant_scalar( t ); + constant_scalar(t); } } -TEST_CASE( "flush_without_position_positionOffset", "[serial]" ) +TEST_CASE("flush_without_position_positionOffset", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - const std::string & file_ending = t; + const std::string &file_ending = t; Series s = Series( "../samples/flush_without_position_positionOffset." + file_ending, - Access::CREATE ); - ParticleSpecies e = s.iterations[ 0 ].particles[ "e" ]; - RecordComponent weighting = e[ "weighting" ][ RecordComponent::SCALAR ]; - weighting.resetDataset( Dataset( Datatype::FLOAT, Extent{ 2, 2 } ) ); - weighting.storeChunk( std::shared_ptr< float >( - new float[ 4 ](), - []( float const * ptr ) { delete[] ptr; } ), - { 0, 0 }, - { 2, 2 } ); + Access::CREATE); + ParticleSpecies e = s.iterations[0].particles["e"]; + RecordComponent weighting = e["weighting"][RecordComponent::SCALAR]; + weighting.resetDataset(Dataset(Datatype::FLOAT, Extent{2, 2})); + weighting.storeChunk( + std::shared_ptr( + new float[4](), [](float const *ptr) { delete[] ptr; }), + {0, 0}, + {2, 2}); s.flush(); - for( auto const & key : { "position", "positionOffset" } ) + for (auto const &key : {"position", "positionOffset"}) { - for( auto const & dim : { "x", "y", "z" } ) + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent rc = e[ key ][ dim ]; - rc.resetDataset( Dataset( Datatype::FLOAT , Extent{ 2, 2 } ) ); - rc.storeChunk( std::shared_ptr< float >( - new float[ 4 ](), - []( float const * ptr ) { delete[] ptr; } ), - { 0, 0 }, - { 2, 2 } ); - } + RecordComponent rc = e[key][dim]; + rc.resetDataset(Dataset(Datatype::FLOAT, Extent{2, 2})); + rc.storeChunk( + std::shared_ptr( + new float[4](), [](float const *ptr) { delete[] ptr; }), + {0, 0}, + {2, 2}); + } } } } - -inline -void particle_patches( std::string file_ending ) +inline void particle_patches(std::string file_ending) { constexpr auto SCALAR = openPMD::RecordComponent::SCALAR; @@ -995,30 +1103,39 @@ void particle_patches( std::string file_ending ) uint64_t const num_patches = 2u; { // constant scalar - Series s = Series("../samples/particle_patches%T." + file_ending, Access::CREATE); + Series s = Series( + "../samples/particle_patches%T." + file_ending, Access::CREATE); auto e = s.iterations[42].particles["electrons"]; - for( auto r : {"x", "y"} ) + for (auto r : {"x", "y"}) { auto x = e["position"][r]; x.resetDataset(Dataset(determineDatatype(), {extent})); - std::vector xd( extent ); + std::vector xd(extent); std::iota(xd.begin(), xd.end(), 0); x.storeChunk(xd); - auto o = e["positionOffset"][r]; + auto o = e["positionOffset"][r]; o.resetDataset(Dataset(determineDatatype(), {extent})); - std::vector od( extent ); + std::vector od(extent); std::iota(od.begin(), od.end(), 0); o.storeChunk(od); s.flush(); } - auto const dset_n = Dataset(determineDatatype(), {num_patches, }); + auto const dset_n = Dataset( + determineDatatype(), + { + num_patches, + }); e.particlePatches["numParticles"][SCALAR].resetDataset(dset_n); e.particlePatches["numParticlesOffset"][SCALAR].resetDataset(dset_n); - auto const dset_f = Dataset(determineDatatype(), {num_patches, }); + auto const dset_f = Dataset( + determineDatatype(), + { + num_patches, + }); e.particlePatches["offset"]["x"].resetDataset(dset_f); e.particlePatches["offset"]["y"].resetDataset(dset_f); e.particlePatches["extent"]["x"].resetDataset(dset_f); @@ -1041,16 +1158,20 @@ void particle_patches( std::string file_ending ) e.particlePatches["extent"]["y"].store(1, float(123.)); } { - Series s = Series("../samples/particle_patches%T." + file_ending, Access::READ_ONLY); + Series s = Series( + "../samples/particle_patches%T." + file_ending, Access::READ_ONLY); auto e = s.iterations[42].particles["electrons"]; - auto numParticles = e.particlePatches["numParticles"][SCALAR].template load< uint64_t >(); - auto numParticlesOffset = e.particlePatches["numParticlesOffset"][SCALAR].template load< uint64_t >(); - auto extent_x = e.particlePatches["extent"]["x"].template load< float >(); - auto extent_y = e.particlePatches["extent"]["y"].template load< float >(); - auto offset_x = e.particlePatches["offset"]["x"].template load< float >(); - auto offset_y = e.particlePatches["offset"]["y"].template load< float >(); + auto numParticles = + e.particlePatches["numParticles"][SCALAR].template load(); + auto numParticlesOffset = + e.particlePatches["numParticlesOffset"][SCALAR] + .template load(); + auto extent_x = e.particlePatches["extent"]["x"].template load(); + auto extent_y = e.particlePatches["extent"]["y"].template load(); + auto offset_x = e.particlePatches["offset"]["x"].template load(); + auto offset_y = e.particlePatches["offset"]["y"].template load(); s.flush(); @@ -1069,19 +1190,18 @@ void particle_patches( std::string file_ending ) } } -TEST_CASE( "particle_patches", "[serial]" ) +TEST_CASE("particle_patches", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - particle_patches( t ); + particle_patches(t); } } -inline -void dtype_test( const std::string & backend ) +inline void dtype_test(const std::string &backend) { - bool test_long_double = (backend != "json") || sizeof (long double) <= 8; - bool test_long_long = (backend != "json") || sizeof (long long) <= 8; + bool test_long_double = (backend != "json") || sizeof(long double) <= 8; + bool test_long_long = (backend != "json") || sizeof(long long) <= 8; { Series s = Series("../samples/dtype_test." + backend, Access::CREATE); @@ -1112,21 +1232,33 @@ void dtype_test( const std::string & backend ) } std::string str = "string"; s.setAttribute("string", str); - s.setAttribute("vecChar", std::vector< char >({'c', 'h', 'a', 'r'})); - s.setAttribute("vecInt16", std::vector< int16_t >({32766, 32767})); - s.setAttribute("vecInt32", std::vector< int32_t >({2147483646, 2147483647})); - s.setAttribute("vecInt64", std::vector< int64_t >({9223372036854775806, 9223372036854775807})); - s.setAttribute("vecUchar", std::vector< char >({'u', 'c', 'h', 'a', 'r'})); - s.setAttribute("vecUint16", std::vector< uint16_t >({65534u, 65535u})); - s.setAttribute("vecUint32", std::vector< uint32_t >({4294967294u, 4294967295u})); - s.setAttribute("vecUint64", std::vector< uint64_t >({18446744073709551614u, 18446744073709551615u})); - s.setAttribute("vecFloat", std::vector< float >({0.f, 3.40282e+38f})); - s.setAttribute("vecDouble", std::vector< double >({0., 1.79769e+308})); + s.setAttribute("vecChar", std::vector({'c', 'h', 'a', 'r'})); + s.setAttribute("vecInt16", std::vector({32766, 32767})); + s.setAttribute( + "vecInt32", std::vector({2147483646, 2147483647})); + s.setAttribute( + "vecInt64", + std::vector({9223372036854775806, 9223372036854775807})); + s.setAttribute( + "vecUchar", std::vector({'u', 'c', 'h', 'a', 'r'})); + s.setAttribute("vecUint16", std::vector({65534u, 65535u})); + s.setAttribute( + "vecUint32", std::vector({4294967294u, 4294967295u})); + s.setAttribute( + "vecUint64", + std::vector( + {18446744073709551614u, 18446744073709551615u})); + s.setAttribute("vecFloat", std::vector({0.f, 3.40282e+38f})); + s.setAttribute("vecDouble", std::vector({0., 1.79769e+308})); if (test_long_double) { - s.setAttribute("vecLongdouble", std::vector< long double >({0.L, std::numeric_limits::max()})); + s.setAttribute( + "vecLongdouble", + std::vector( + {0.L, std::numeric_limits::max()})); } - s.setAttribute("vecString", std::vector< std::string >({"vector", "of", "strings"})); + s.setAttribute( + "vecString", std::vector({"vector", "of", "strings"})); s.setAttribute("bool", true); s.setAttribute("boolF", false); @@ -1153,66 +1285,97 @@ void dtype_test( const std::string & backend ) unsigned long long ull = 128u; s.setAttribute("ulonglong", ull); } - s.setAttribute("vecShort", std::vector< short >({32766, 32767})); - s.setAttribute("vecInt", std::vector< int >({32766, 32767})); - s.setAttribute("vecLong", std::vector< long >({2147483646, 2147483647})); + s.setAttribute("vecShort", std::vector({32766, 32767})); + s.setAttribute("vecInt", std::vector({32766, 32767})); + s.setAttribute("vecLong", std::vector({2147483646, 2147483647})); if (test_long_long) { - s.setAttribute("vecLongLong", std::vector< long long >({2147483644, 2147483643})); + s.setAttribute( + "vecLongLong", + std::vector({2147483644, 2147483643})); } - s.setAttribute("vecUShort", std::vector< unsigned short >({65534u, 65535u})); - s.setAttribute("vecUInt", std::vector< unsigned int >({65533u, 65531u})); - s.setAttribute("vecULong", std::vector< unsigned long >({65532u, 65530u})); + s.setAttribute( + "vecUShort", std::vector({65534u, 65535u})); + s.setAttribute("vecUInt", std::vector({65533u, 65531u})); + s.setAttribute( + "vecULong", std::vector({65532u, 65530u})); if (test_long_long) { - s.setAttribute("vecULongLong", std::vector< unsigned long long >({65531u, 65529u})); + s.setAttribute( + "vecULongLong", + std::vector({65531u, 65529u})); } // long double grid spacing // should be possible to parse without error upon opening // the series for reading { - auto E = s.iterations[ 0 ].meshes[ "E" ]; - E.setGridSpacing( std::vector< long double >{ 1.0, 1.0 } ); - auto E_x = E[ "x" ]; - E_x.makeEmpty< double >( 1 ); + auto E = s.iterations[0].meshes["E"]; + E.setGridSpacing(std::vector{1.0, 1.0}); + auto E_x = E["x"]; + E_x.makeEmpty(1); } } Series s = Series("../samples/dtype_test." + backend, Access::READ_ONLY); - REQUIRE(s.getAttribute("char").get< char >() == 'c'); - REQUIRE(s.getAttribute("uchar").get< unsigned char >() == 'u'); - REQUIRE(s.getAttribute("int16").get< int16_t >() == 16); - REQUIRE(s.getAttribute("int32").get< int32_t >() == 32); - REQUIRE(s.getAttribute("int64").get< int64_t >() == 64); - REQUIRE(s.getAttribute("uint16").get< uint16_t >() == 16u); - REQUIRE(s.getAttribute("uint32").get< uint32_t >() == 32u); - REQUIRE(s.getAttribute("uint64").get< uint64_t >() == 64u); - REQUIRE(s.getAttribute("float").get< float >() == 16.e10f); - REQUIRE(s.getAttribute("double").get< double >() == 1.e64); + REQUIRE(s.getAttribute("char").get() == 'c'); + REQUIRE(s.getAttribute("uchar").get() == 'u'); + REQUIRE(s.getAttribute("int16").get() == 16); + REQUIRE(s.getAttribute("int32").get() == 32); + REQUIRE(s.getAttribute("int64").get() == 64); + REQUIRE(s.getAttribute("uint16").get() == 16u); + REQUIRE(s.getAttribute("uint32").get() == 32u); + REQUIRE(s.getAttribute("uint64").get() == 64u); + REQUIRE(s.getAttribute("float").get() == 16.e10f); + REQUIRE(s.getAttribute("double").get() == 1.e64); if (test_long_double) { - REQUIRE(s.getAttribute("longdouble").get< long double >() == 1.e80L); - } - REQUIRE(s.getAttribute("string").get< std::string >() == "string"); - REQUIRE(s.getAttribute("vecChar").get< std::vector< char > >() == std::vector< char >({'c', 'h', 'a', 'r'})); - REQUIRE(s.getAttribute("vecInt16").get< std::vector< int16_t > >() == std::vector< int16_t >({32766, 32767})); - REQUIRE(s.getAttribute("vecInt32").get< std::vector< int32_t > >() == std::vector< int32_t >({2147483646, 2147483647})); - REQUIRE(s.getAttribute("vecInt64").get< std::vector< int64_t > >() == std::vector< int64_t >({9223372036854775806, 9223372036854775807})); - REQUIRE(s.getAttribute("vecUchar").get< std::vector< char > >() == std::vector< char >({'u', 'c', 'h', 'a', 'r'})); - REQUIRE(s.getAttribute("vecUint16").get< std::vector< uint16_t > >() == std::vector< uint16_t >({65534u, 65535u})); - REQUIRE(s.getAttribute("vecUint32").get< std::vector< uint32_t > >() == std::vector< uint32_t >({4294967294u, 4294967295u})); - REQUIRE(s.getAttribute("vecUint64").get< std::vector< uint64_t > >() == std::vector< uint64_t >({18446744073709551614u, 18446744073709551615u})); - REQUIRE(s.getAttribute("vecFloat").get< std::vector< float > >() == std::vector< float >({0.f, 3.40282e+38f})); - REQUIRE(s.getAttribute("vecDouble").get< std::vector< double > >() == std::vector< double >({0., 1.79769e+308})); + REQUIRE(s.getAttribute("longdouble").get() == 1.e80L); + } + REQUIRE(s.getAttribute("string").get() == "string"); + REQUIRE( + s.getAttribute("vecChar").get >() == + std::vector({'c', 'h', 'a', 'r'})); + REQUIRE( + s.getAttribute("vecInt16").get >() == + std::vector({32766, 32767})); + REQUIRE( + s.getAttribute("vecInt32").get >() == + std::vector({2147483646, 2147483647})); + REQUIRE( + s.getAttribute("vecInt64").get >() == + std::vector({9223372036854775806, 9223372036854775807})); + REQUIRE( + s.getAttribute("vecUchar").get >() == + std::vector({'u', 'c', 'h', 'a', 'r'})); + REQUIRE( + s.getAttribute("vecUint16").get >() == + std::vector({65534u, 65535u})); + REQUIRE( + s.getAttribute("vecUint32").get >() == + std::vector({4294967294u, 4294967295u})); + REQUIRE( + s.getAttribute("vecUint64").get >() == + std::vector({18446744073709551614u, 18446744073709551615u})); + REQUIRE( + s.getAttribute("vecFloat").get >() == + std::vector({0.f, 3.40282e+38f})); + REQUIRE( + s.getAttribute("vecDouble").get >() == + std::vector({0., 1.79769e+308})); if (test_long_double) { - REQUIRE(s.getAttribute("vecLongdouble").get< std::vector< long double > >() == std::vector< long double >({0.L, std::numeric_limits::max()})); + REQUIRE( + s.getAttribute("vecLongdouble").get >() == + std::vector( + {0.L, std::numeric_limits::max()})); } - REQUIRE(s.getAttribute("vecString").get< std::vector< std::string > >() == std::vector< std::string >({"vector", "of", "strings"})); - REQUIRE(s.getAttribute("bool").get< bool >() == true); - REQUIRE(s.getAttribute("boolF").get< bool >() == false); + REQUIRE( + s.getAttribute("vecString").get >() == + std::vector({"vector", "of", "strings"})); + REQUIRE(s.getAttribute("bool").get() == true); + REQUIRE(s.getAttribute("boolF").get() == false); // same implementation types (not necessary aliases) detection #if !defined(_MSC_VER) @@ -1237,7 +1400,8 @@ void dtype_test( const std::string & backend ) REQUIRE(s.getAttribute("vecULong").dtype == Datatype::VEC_ULONG); if (test_long_long) { - REQUIRE(s.getAttribute("vecULongLong").dtype == Datatype::VEC_ULONGLONG); + REQUIRE( + s.getAttribute("vecULongLong").dtype == Datatype::VEC_ULONGLONG); } #endif REQUIRE(isSame(s.getAttribute("short").dtype, Datatype::SHORT)); @@ -1260,71 +1424,86 @@ void dtype_test( const std::string & backend ) REQUIRE(isSame(s.getAttribute("vecLong").dtype, Datatype::VEC_LONG)); if (test_long_long) { - REQUIRE(isSame(s.getAttribute("vecLongLong").dtype, Datatype::VEC_LONGLONG)); + REQUIRE(isSame( + s.getAttribute("vecLongLong").dtype, Datatype::VEC_LONGLONG)); } REQUIRE(isSame(s.getAttribute("vecUShort").dtype, Datatype::VEC_USHORT)); REQUIRE(isSame(s.getAttribute("vecUInt").dtype, Datatype::VEC_UINT)); REQUIRE(isSame(s.getAttribute("vecULong").dtype, Datatype::VEC_ULONG)); if (test_long_long) { - REQUIRE(isSame(s.getAttribute("vecULongLong").dtype, Datatype::VEC_ULONGLONG)); + REQUIRE(isSame( + s.getAttribute("vecULongLong").dtype, Datatype::VEC_ULONGLONG)); } } -TEST_CASE( "dtype_test", "[serial]" ) +TEST_CASE("dtype_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) dtype_test(t); } -inline -void write_test(const std::string & backend) +inline void write_test(const std::string &backend) { Series o = Series("../samples/serial_write." + backend, Access::CREATE); - ParticleSpecies& e_1 = o.iterations[1].particles["e"]; + ParticleSpecies &e_1 = o.iterations[1].particles["e"]; - std::vector< double > position_global(4); + std::vector position_global(4); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_1(new double); - e_1["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_1), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_1(new double); + e_1["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_1), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_1 = position_global[i]; e_1["position"]["x"].storeChunk(position_local_1, {i}, {1}); } - std::vector< uint64_t > positionOffset_global(4); + std::vector positionOffset_global(4); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_1(new uint64_t); - e_1["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_1), {4})); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local_1(new uint64_t); + e_1["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_1), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *positionOffset_local_1 = positionOffset_global[i]; e_1["positionOffset"]["x"].storeChunk(positionOffset_local_1, {i}, {1}); } - ParticleSpecies& e_2 = o.iterations[2].particles["e"]; + ParticleSpecies &e_2 = o.iterations[2].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_2(new double); - e_2["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_2), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_2(new double); + e_2["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_2), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_2 = position_global[i]; e_2["position"]["x"].storeChunk(position_local_2, {i}, {1}); } - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_2(new uint64_t); - e_2["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_2), {4})); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local_2(new uint64_t); + e_2["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_2), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *positionOffset_local_2 = positionOffset_global[i]; e_2["positionOffset"]["x"].storeChunk(positionOffset_local_2, {i}, {1}); @@ -1332,23 +1511,30 @@ void write_test(const std::string & backend) o.flush(); - ParticleSpecies& e_3 = o.iterations[3].particles["e"]; + ParticleSpecies &e_3 = o.iterations[3].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_3(new double); - e_3["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_3), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_3(new double); + e_3["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_3), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_3 = position_global[i]; e_3["position"]["x"].storeChunk(position_local_3, {i}, {1}); } - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_3(new uint64_t); - e_3["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_3), {4})); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local_3(new uint64_t); + e_3["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_3), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *positionOffset_local_3 = positionOffset_global[i]; e_3["positionOffset"]["x"].storeChunk(positionOffset_local_3, {i}, {1}); @@ -1357,26 +1543,29 @@ void write_test(const std::string & backend) o.flush(); } -TEST_CASE( "write_test", "[serial]" ) +TEST_CASE("write_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - write_test( t ); - Series list{ "../samples/serial_write." + t, Access::READ_ONLY }; - helper::listSeries( list ); + write_test(t); + Series list{"../samples/serial_write." + t, Access::READ_ONLY}; + helper::listSeries(list); } } -void test_complex(const std::string & backend) { +void test_complex(const std::string &backend) +{ { - Series o = Series("../samples/serial_write_complex." + backend, Access::CREATE); + Series o = Series( + "../samples/serial_write_complex." + backend, Access::CREATE); o.setAttribute("lifeIsComplex", std::complex(4.56, 7.89)); o.setAttribute("butComplexFloats", std::complex(42.3, -99.3)); - if( backend != "bp" ) - o.setAttribute("longDoublesYouSay", std::complex(5.5, -4.55)); + if (backend != "bp") + o.setAttribute( + "longDoublesYouSay", std::complex(5.5, -4.55)); auto Cflt = o.iterations[0].meshes["Cflt"][RecordComponent::SCALAR]; - std::vector< std::complex > cfloats(3); + std::vector > cfloats(3); cfloats.at(0) = {1., 2.}; cfloats.at(1) = {-3., 4.}; cfloats.at(2) = {5., -6.}; @@ -1384,21 +1573,23 @@ void test_complex(const std::string & backend) { Cflt.storeChunk(cfloats, {0}); auto Cdbl = o.iterations[0].meshes["Cdbl"][RecordComponent::SCALAR]; - std::vector< std::complex > cdoubles(3); + std::vector > cdoubles(3); cdoubles.at(0) = {2., 1.}; cdoubles.at(1) = {-4., 3.}; cdoubles.at(2) = {6., -5.}; Cdbl.resetDataset(Dataset(Datatype::CDOUBLE, {cdoubles.size()})); Cdbl.storeChunk(cdoubles, {0}); - std::vector< std::complex > cldoubles(3); - if( backend != "bp" ) + std::vector > cldoubles(3); + if (backend != "bp") { - auto Cldbl = o.iterations[0].meshes["Cldbl"][RecordComponent::SCALAR]; + auto Cldbl = + o.iterations[0].meshes["Cldbl"][RecordComponent::SCALAR]; cldoubles.at(0) = {3., 2.}; cldoubles.at(1) = {-5., 4.}; cldoubles.at(2) = {7., -6.}; - Cldbl.resetDataset(Dataset(Datatype::CLONG_DOUBLE, {cldoubles.size()})); + Cldbl.resetDataset( + Dataset(Datatype::CLONG_DOUBLE, {cldoubles.size()})); Cldbl.storeChunk(cldoubles, {0}); } @@ -1406,48 +1597,63 @@ void test_complex(const std::string & backend) { } { - Series i = Series("../samples/serial_write_complex." + backend, Access::READ_ONLY); - REQUIRE(i.getAttribute("lifeIsComplex").get< std::complex >() == std::complex(4.56, 7.89)); - REQUIRE(i.getAttribute("butComplexFloats").get< std::complex >() == std::complex(42.3, -99.3)); - if( backend != "bp" ) { - REQUIRE(i.getAttribute("longDoublesYouSay").get >() == - std::complex(5.5, -4.55)); + Series i = Series( + "../samples/serial_write_complex." + backend, Access::READ_ONLY); + REQUIRE( + i.getAttribute("lifeIsComplex").get >() == + std::complex(4.56, 7.89)); + REQUIRE( + i.getAttribute("butComplexFloats").get >() == + std::complex(42.3, -99.3)); + if (backend != "bp") + { + REQUIRE( + i.getAttribute("longDoublesYouSay") + .get >() == + std::complex(5.5, -4.55)); } - auto rcflt = i.iterations[0].meshes["Cflt"][RecordComponent::SCALAR].loadChunk< std::complex >(); - auto rcdbl = i.iterations[0].meshes["Cdbl"][RecordComponent::SCALAR].loadChunk< std::complex >(); + auto rcflt = i.iterations[0] + .meshes["Cflt"][RecordComponent::SCALAR] + .loadChunk >(); + auto rcdbl = i.iterations[0] + .meshes["Cdbl"][RecordComponent::SCALAR] + .loadChunk >(); i.flush(); REQUIRE(rcflt.get()[1] == std::complex(-3., 4.)); REQUIRE(rcdbl.get()[2] == std::complex(6, -5.)); - if( backend != "bp" ) + if (backend != "bp") { - auto rcldbl = i.iterations[0].meshes["Cldbl"][RecordComponent::SCALAR].loadChunk< std::complex >(); + auto rcldbl = i.iterations[0] + .meshes["Cldbl"][RecordComponent::SCALAR] + .loadChunk >(); i.flush(); REQUIRE(rcldbl.get()[2] == std::complex(7., -6.)); } } { - Series list{ "../samples/serial_write_complex." + backend, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/serial_write_complex." + backend, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "test_complex", "[serial]" ) +TEST_CASE("test_complex", "[serial]") { // Notes: // - ADIOS1 and ADIOS 2.7.0 have no complex long double // - JSON read-back not distinguishable yet from N+1 shaped data set - for (auto const & t : testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { test_complex(t); } } -inline -void fileBased_add_EDpic(ParticleSpecies& e, uint64_t const num_particles) +inline void +fileBased_add_EDpic(ParticleSpecies &e, uint64_t const num_particles) { // ED-PIC e["position"].setAttribute("weightingPower", 0.0); @@ -1461,7 +1667,6 @@ void fileBased_add_EDpic(ParticleSpecies& e, uint64_t const num_particles) e["momentum"].setAttribute("weightingPower", 1.0); e["momentum"].setAttribute("macroWeighted", uint32_t(0)); - e["charge"][RecordComponent::SCALAR].resetDataset(dsDbl); e["charge"][RecordComponent::SCALAR].makeConstant(2.3); e["charge"].setAttribute("weightingPower", 1.0); @@ -1484,97 +1689,135 @@ void fileBased_add_EDpic(ParticleSpecies& e, uint64_t const num_particles) e.setAttribute("particleSmoothing", "none"); } -inline -void fileBased_write_test(const std::string & backend) +inline void fileBased_write_test(const std::string &backend) { - if( auxiliary::directory_exists("../samples/subdir") ) + if (auxiliary::directory_exists("../samples/subdir")) auxiliary::remove_directory("../samples/subdir"); { - Series o = Series("../samples/subdir/serial_fileBased_write%03T." + backend, Access::CREATE); + Series o = Series( + "../samples/subdir/serial_fileBased_write%03T." + backend, + Access::CREATE); - ParticleSpecies& e_1 = o.iterations[1].particles["e"]; + ParticleSpecies &e_1 = o.iterations[1].particles["e"]; - std::vector< double > position_global(4); + std::vector position_global(4); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_1(new double); - e_1["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_1), {4})); - std::vector< uint64_t > positionOffset_global(4); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_1(new double); + e_1["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_1), {4})); + std::vector positionOffset_global(4); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_1(new uint64_t); - e_1["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_1), {4})); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local_1(new uint64_t); + e_1["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_1), {4})); fileBased_add_EDpic(e_1, 4); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_1 = position_global[i]; e_1["position"]["x"].storeChunk(position_local_1, {i}, {1}); *positionOffset_local_1 = positionOffset_global[i]; - e_1["positionOffset"]["x"].storeChunk(positionOffset_local_1, {i}, {1}); + e_1["positionOffset"]["x"].storeChunk( + positionOffset_local_1, {i}, {1}); o.flush(); } - o.iterations[1].setTime(static_cast< double >(1)); + o.iterations[1].setTime(static_cast(1)); - ParticleSpecies& e_2 = o.iterations[2].particles["e"]; + ParticleSpecies &e_2 = o.iterations[2].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - e_2["position"]["x"].resetDataset(Dataset(determineDatatype(), {4})); - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_2(new uint64_t); - e_2["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_2), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + e_2["position"]["x"].resetDataset( + Dataset(determineDatatype(), {4})); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local_2(new uint64_t); + e_2["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_2), {4})); fileBased_add_EDpic(e_2, 4); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { double const position_local_2 = position_global.at(i); - e_2["position"]["x"].storeChunk(shareRaw(&position_local_2), {i}, {1}); + e_2["position"]["x"].storeChunk( + shareRaw(&position_local_2), {i}, {1}); *positionOffset_local_2 = positionOffset_global[i]; - e_2["positionOffset"]["x"].storeChunk(positionOffset_local_2, {i}, {1}); + e_2["positionOffset"]["x"].storeChunk( + positionOffset_local_2, {i}, {1}); o.flush(); } - o.iterations[2].setTime(static_cast< double >(2)); + o.iterations[2].setTime(static_cast(2)); - ParticleSpecies& e_3 = o.iterations[3].particles["e"]; + ParticleSpecies &e_3 = o.iterations[3].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_3(new double); - e_3["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_3), {4})); - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_3(new uint64_t); - e_3["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_3), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_3(new double); + e_3["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_3), {4})); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local_3(new uint64_t); + e_3["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_3), {4})); fileBased_add_EDpic(e_3, 4); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_3 = position_global[i]; e_3["position"]["x"].storeChunk(position_local_3, {i}, {1}); *positionOffset_local_3 = positionOffset_global[i]; - e_3["positionOffset"]["x"].storeChunk(positionOffset_local_3, {i}, {1}); + e_3["positionOffset"]["x"].storeChunk( + positionOffset_local_3, {i}, {1}); o.flush(); } - o.setOpenPMDextension(1); // this happens intentionally "late" in this test - o.iterations[3].setTime(static_cast< double >(3)); - o.iterations[4].setTime(static_cast< double >(4)); + o.setOpenPMDextension( + 1); // this happens intentionally "late" in this test + o.iterations[3].setTime(static_cast(3)); + o.iterations[4].setTime(static_cast(4)); o.flush(); - o.iterations[5].setTime(static_cast< double >(5)); + o.iterations[5].setTime(static_cast(5)); } - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write001." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write001." + backend))); - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write002." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write002." + backend))); - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write003." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write003." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write001." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write001." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write002." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write002." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write003." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write003." + backend))); { - Series o = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_ONLY); + Series o = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_ONLY); REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(1) == 1); @@ -1595,90 +1838,104 @@ void fileBased_write_test(const std::string & backend) REQUIRE(o.particlesPath() == "particles/"); REQUIRE_FALSE(o.containsAttribute("meshesPath")); REQUIRE_THROWS_AS(o.meshesPath(), no_such_attribute_error); - std::array< double, 7 > udim{{1, 0, 0, 0, 0, 0, 0}}; + std::array udim{{1, 0, 0, 0, 0, 0, 0}}; Extent ext{4}; - for( auto& entry : o.iterations ) + for (auto &entry : o.iterations) { - auto& it = entry.second; - REQUIRE(it.dt< double >() == 1.); - REQUIRE(it.time< double >() == static_cast< double >(entry.first)); + auto &it = entry.second; + REQUIRE(it.dt() == 1.); + REQUIRE(it.time() == static_cast(entry.first)); REQUIRE(it.timeUnitSI() == 1.); - if( entry.first > 3 ) + if (entry.first > 3) continue; // empty iterations - auto& pos = it.particles.at("e").at("position"); - REQUIRE(pos.timeOffset< float >() == 0.f); + auto &pos = it.particles.at("e").at("position"); + REQUIRE(pos.timeOffset() == 0.f); REQUIRE(pos.unitDimension() == udim); REQUIRE(!pos.scalar()); - auto& pos_x = pos.at("x"); + auto &pos_x = pos.at("x"); REQUIRE(pos_x.unitSI() == 1.); REQUIRE(pos_x.getExtent() == ext); REQUIRE(pos_x.getDatatype() == Datatype::DOUBLE); REQUIRE(!pos_x.constant()); - auto& posOff = it.particles.at("e").at("positionOffset"); - REQUIRE(posOff.timeOffset< float >() == 0.f); + auto &posOff = it.particles.at("e").at("positionOffset"); + REQUIRE(posOff.timeOffset() == 0.f); REQUIRE(posOff.unitDimension() == udim); - auto& posOff_x = posOff.at("x"); + auto &posOff_x = posOff.at("x"); REQUIRE(posOff_x.unitSI() == 1.); REQUIRE(posOff_x.getExtent() == ext); #if !defined(_MSC_VER) - REQUIRE(posOff_x.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(posOff_x.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(posOff_x.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(posOff_x.getDatatype(), determineDatatype())); - auto position = pos_x.loadChunk< double >({0}, {4}); + auto position = pos_x.loadChunk({0}, {4}); auto position_raw = position.get(); - auto positionOffset = posOff_x.loadChunk< uint64_t >({0}, {4}); + auto positionOffset = posOff_x.loadChunk({0}, {4}); auto positionOffset_raw = positionOffset.get(); o.flush(); - for( uint64_t j = 0; j < 4; ++j ) + for (uint64_t j = 0; j < 4; ++j) { - REQUIRE(position_raw[j] == static_cast< double >(j + (entry.first-1)*4)); - REQUIRE(positionOffset_raw[j] == j + (entry.first-1)*4); + REQUIRE( + position_raw[j] == + static_cast(j + (entry.first - 1) * 4)); + REQUIRE(positionOffset_raw[j] == j + (entry.first - 1) * 4); } } - REQUIRE(o.iterations[3].time< double >() == 3.0); - REQUIRE(o.iterations[4].time< double >() == 4.0); - REQUIRE(o.iterations[5].time< double >() == 5.0); + REQUIRE(o.iterations[3].time() == 3.0); + REQUIRE(o.iterations[4].time() == 4.0); + REQUIRE(o.iterations[5].time() == 5.0); } - // extend existing series with new step and auto-detection of iteration padding + // extend existing series with new step and auto-detection of iteration + // padding { - Series o = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_WRITE); + Series o = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_WRITE); REQUIRE(o.iterations.size() == 5); o.iterations[6]; REQUIRE(o.iterations.size() == 6); // write something to trigger opening of the file - o.iterations[ 6 ].particles[ "e" ][ "position" ][ "x" ].resetDataset( - { Datatype::DOUBLE, { 10 } } ); - o.iterations[ 6 ] - .particles[ "e" ][ "position" ][ "x" ] - .makeConstant< double >( 1.0 ); + o.iterations[6].particles["e"]["position"]["x"].resetDataset( + {Datatype::DOUBLE, {10}}); + o.iterations[6].particles["e"]["position"]["x"].makeConstant( + 1.0); - // additional iteration with over-running iteration padding but similar content + // additional iteration with over-running iteration padding but similar + // content // padding: 000 uint64_t const overlong_it = 123456; - o.iterations[ overlong_it ]; + o.iterations[overlong_it]; // write something to trigger opening of the file - o.iterations[ overlong_it ].particles[ "e" ][ "position" ][ "x" ].resetDataset( - { Datatype::DOUBLE, { 12 } } ); - o.iterations[ overlong_it ] - .particles[ "e" ][ "position" ][ "x" ] - .makeConstant< double >( 1.0 ); + o.iterations[overlong_it].particles["e"]["position"]["x"].resetDataset( + {Datatype::DOUBLE, {12}}); + o.iterations[overlong_it] + .particles["e"]["position"]["x"] + .makeConstant(1.0); - o.iterations[ overlong_it ].setTime(static_cast< double >(overlong_it)); + o.iterations[overlong_it].setTime(static_cast(overlong_it)); REQUIRE(o.iterations.size() == 7); } - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write004." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write004." + backend))); - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write123456." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write123456." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write004." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write004." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write123456." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write123456." + backend))); // additional iteration with shorter iteration padding but similar content { - Series o = Series("../samples/subdir/serial_fileBased_write%01T." + backend, Access::READ_WRITE); + Series o = Series( + "../samples/subdir/serial_fileBased_write%01T." + backend, + Access::READ_WRITE); REQUIRE(o.iterations.size() == 1); /* @@ -1688,165 +1945,190 @@ void fileBased_write_test(const std::string & backend) */ REQUIRE(o.iterations.count(123456) == 1); - auto& it = o.iterations[10]; - ParticleSpecies& e = it.particles["e"]; + auto &it = o.iterations[10]; + ParticleSpecies &e = it.particles["e"]; e["position"]["x"].resetDataset(Dataset(Datatype::DOUBLE, {42})); e["positionOffset"]["x"].resetDataset(Dataset(Datatype::DOUBLE, {42})); e["position"]["x"].makeConstant(1.23); e["positionOffset"]["x"].makeConstant(1.23); fileBased_add_EDpic(e, 42); - it.setTime(static_cast< double >(10)); + it.setTime(static_cast(10)); REQUIRE(o.iterations.size() == 2); } - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write10." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write10." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write10." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write10." + backend))); // read back with auto-detection and non-fixed padding { - Series s = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_ONLY); REQUIRE(s.iterations.size() == 8); REQUIRE(s.iterations.contains(4)); REQUIRE(s.iterations.contains(10)); REQUIRE(s.iterations.contains(123456)); - REQUIRE(s.iterations[3].time< double >() == 3.0); - REQUIRE(s.iterations[4].time< double >() == 4.0); - REQUIRE(s.iterations[5].time< double >() == 5.0); - REQUIRE(s.iterations[10].time< double >() == 10.0); - REQUIRE(s.iterations[123456].time< double >() == double(123456)); + REQUIRE(s.iterations[3].time() == 3.0); + REQUIRE(s.iterations[4].time() == 4.0); + REQUIRE(s.iterations[5].time() == 5.0); + REQUIRE(s.iterations[10].time() == 10.0); + REQUIRE(s.iterations[123456].time() == double(123456)); } // write with auto-detection and in-consistent padding from step 10 { - REQUIRE_THROWS_WITH(Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_WRITE), - Catch::Equals("Cannot write to a series with inconsistent iteration padding. Please specify '%0T' or open as read-only.")); + REQUIRE_THROWS_WITH( + Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_WRITE), + Catch::Equals( + "Cannot write to a series with inconsistent iteration padding. " + "Please specify '%0T' or open as read-only.")); } // read back with fixed padding { - Series s = Series("../samples/subdir/serial_fileBased_write%03T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/subdir/serial_fileBased_write%03T." + backend, + Access::READ_ONLY); REQUIRE(s.iterations.size() == 7); REQUIRE(s.iterations.contains(4)); REQUIRE(!s.iterations.contains(10)); REQUIRE(s.iterations.contains(123456)); - REQUIRE(s.iterations[3].time< double >() == 3.0); - REQUIRE(s.iterations[4].time< double >() == 4.0); - REQUIRE(s.iterations[5].time< double >() == 5.0); + REQUIRE(s.iterations[3].time() == 3.0); + REQUIRE(s.iterations[4].time() == 4.0); + REQUIRE(s.iterations[5].time() == 5.0); } // read back with auto-detection (allow relaxed/overflow padding) { - Series s = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_ONLY); REQUIRE(s.iterations.size() == 8); REQUIRE(s.iterations.contains(4)); REQUIRE(s.iterations.contains(10)); REQUIRE(s.iterations.contains(123456)); - REQUIRE(s.iterations[3].time< double >() == 3.0); - REQUIRE(s.iterations[4].time< double >() == 4.0); - REQUIRE(s.iterations[5].time< double >() == 5.0); - REQUIRE(s.iterations[10].time< double >() == 10.0); - REQUIRE(s.iterations[123456].time< double >() == - static_cast< double >(123456)); + REQUIRE(s.iterations[3].time() == 3.0); + REQUIRE(s.iterations[4].time() == 4.0); + REQUIRE(s.iterations[5].time() == 5.0); + REQUIRE(s.iterations[10].time() == 10.0); + REQUIRE( + s.iterations[123456].time() == static_cast(123456)); } { - Series list{ "../samples/subdir/serial_fileBased_write%03T." + backend, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/subdir/serial_fileBased_write%03T." + backend, + Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "fileBased_write_test", "[serial]" ) +TEST_CASE("fileBased_write_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - fileBased_write_test( t ); + fileBased_write_test(t); } } -inline -void sample_write_thetaMode(std::string file_ending) +inline void sample_write_thetaMode(std::string file_ending) { - Series o = Series(std::string("../samples/thetaMode_%05T.").append(file_ending), Access::CREATE); + Series o = Series( + std::string("../samples/thetaMode_%05T.").append(file_ending), + Access::CREATE); unsigned int const num_modes = 4u; - unsigned int const num_fields = 1u + (num_modes-1u) * 2u; // the first mode is purely real + unsigned int const num_fields = + 1u + (num_modes - 1u) * 2u; // the first mode is purely real unsigned int const N_r = 20; unsigned int const N_z = 64; - std::shared_ptr< float > E_r_data(new float[num_fields*N_r*N_z], [](float const *p){ delete[] p; }); - std::shared_ptr< double > E_t_data(new double[num_fields*N_r*N_z], [](double const *p){ delete[] p; }); - float e_r{0}; - std::generate(E_r_data.get(), E_r_data.get() + num_fields*N_r*N_z, [&e_r]{ return e_r += 1.0f; }); + std::shared_ptr E_r_data( + new float[num_fields * N_r * N_z], [](float const *p) { delete[] p; }); + std::shared_ptr E_t_data( + new double[num_fields * N_r * N_z], + [](double const *p) { delete[] p; }); + float e_r{0}; + std::generate( + E_r_data.get(), E_r_data.get() + num_fields * N_r * N_z, [&e_r] { + return e_r += 1.0f; + }); double e_t{100}; - std::generate(E_t_data.get(), E_t_data.get() + num_fields*N_r*N_z, [&e_t]{ return e_t += 2.0; }); + std::generate( + E_t_data.get(), E_t_data.get() + num_fields * N_r * N_z, [&e_t] { + return e_t += 2.0; + }); std::stringstream geos; geos << "m=" << num_modes << ";imag=+"; std::string const geometryParameters = geos.str(); - for(int i = 0; i <= 400; i+=100 ) + for (int i = 0; i <= 400; i += 100) { auto it = o.iterations[i]; Mesh E = it.meshes["E"]; - E.setGeometry( Mesh::Geometry::thetaMode ); - E.setGeometryParameters( geometryParameters ); - E.setDataOrder( Mesh::DataOrder::C ); - E.setGridSpacing( std::vector{1.0, 1.0} ); - E.setGridGlobalOffset( std::vector{0.0, 0.0} ); - E.setGridUnitSI( 1.0 ); - E.setAxisLabels( std::vector< std::string >{"r", "z"} ); - std::map< UnitDimension, double > const unitDimensions{ - {UnitDimension::I, 1.0}, - {UnitDimension::J, 2.0} - }; - E.setUnitDimension( unitDimensions ); - E.setTimeOffset( 1.e-12 * double(i) ); + E.setGeometry(Mesh::Geometry::thetaMode); + E.setGeometryParameters(geometryParameters); + E.setDataOrder(Mesh::DataOrder::C); + E.setGridSpacing(std::vector{1.0, 1.0}); + E.setGridGlobalOffset(std::vector{0.0, 0.0}); + E.setGridUnitSI(1.0); + E.setAxisLabels(std::vector{"r", "z"}); + std::map const unitDimensions{ + {UnitDimension::I, 1.0}, {UnitDimension::J, 2.0}}; + E.setUnitDimension(unitDimensions); + E.setTimeOffset(1.e-12 * double(i)); auto E_z = E["z"]; - E_z.setUnitSI( 10. ); - E_z.setPosition(std::vector< double >{0.0, 0.5}); - E_z.resetDataset( Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) ); // (modes, r, z) see setGeometryParameters - E_z.makeConstant( static_cast< float >(42.54) ); - - // write all modes at once (otherwise iterate over modes and first index) + E_z.setUnitSI(10.); + E_z.setPosition(std::vector{0.0, 0.5}); + E_z.resetDataset(Dataset( + Datatype::FLOAT, + {num_fields, N_r, N_z})); // (modes, r, z) see setGeometryParameters + E_z.makeConstant(static_cast(42.54)); + + // write all modes at once (otherwise iterate over modes and first + // index) auto E_r = E["r"]; - E_r.setUnitSI( 10. ); - E_r.setPosition(std::vector< double >{0.5, 0.0}); - E_r.resetDataset( - Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) - ); + E_r.setUnitSI(10.); + E_r.setPosition(std::vector{0.5, 0.0}); + E_r.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); E_r.storeChunk(E_r_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); auto E_t = E["t"]; - E_t.setUnitSI( 10. ); - E_t.setPosition(std::vector< double >{0.0, 0.0}); - E_t.resetDataset( - Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z}) - ); + E_t.setUnitSI(10.); + E_t.setPosition(std::vector{0.0, 0.0}); + E_t.resetDataset(Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z})); E_t.storeChunk(E_t_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); o.flush(); } } -TEST_CASE( "sample_write_thetaMode", "[serial][thetaMode]" ) +TEST_CASE("sample_write_thetaMode", "[serial][thetaMode]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - sample_write_thetaMode( t ); + sample_write_thetaMode(t); - Series list{ std::string("../samples/thetaMode_%05T.").append(t), Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + std::string("../samples/thetaMode_%05T.").append(t), + Access::READ_ONLY}; + helper::listSeries(list); } } -inline -void bool_test(const std::string & backend) +inline void bool_test(const std::string &backend) { { Series o = Series("../samples/serial_bool." + backend, Access::CREATE); @@ -1856,30 +2138,34 @@ void bool_test(const std::string & backend) o.setAttribute("Bool attribute (false)", false); } { - Series o = Series("../samples/serial_bool." + backend, Access::READ_ONLY); + Series o = + Series("../samples/serial_bool." + backend, Access::READ_ONLY); auto attrs = o.attributes(); - REQUIRE(std::count(attrs.begin(), attrs.end(), "Bool attribute (true)") == 1); - REQUIRE(std::count(attrs.begin(), attrs.end(), "Bool attribute (false)") == 1); - REQUIRE(o.getAttribute("Bool attribute (true)").get< bool >() == true); - REQUIRE(o.getAttribute("Bool attribute (false)").get< bool >() == false); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "Bool attribute (true)") == + 1); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "Bool attribute (false)") == + 1); + REQUIRE(o.getAttribute("Bool attribute (true)").get() == true); + REQUIRE(o.getAttribute("Bool attribute (false)").get() == false); } { - Series list{ "../samples/serial_bool." + backend, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{"../samples/serial_bool." + backend, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "bool_test", "[serial]" ) +TEST_CASE("bool_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - bool_test( t ); + bool_test(t); } } -inline -void patch_test(const std::string & backend) +inline void patch_test(const std::string &backend) { Series o = Series("../samples/serial_patch." + backend, Access::CREATE); @@ -1894,44 +2180,55 @@ void patch_test(const std::string & backend) uint64_t const patch_idx = 0u; uint64_t const num_patches = 1u; - auto const dset_n = Dataset(determineDatatype(), {num_patches, }); - e.particlePatches["numParticles"][RecordComponent::SCALAR].resetDataset(dset_n); - e.particlePatches["numParticles"][RecordComponent::SCALAR].store(patch_idx, num_particles); - e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR].resetDataset(dset_n); - e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR].store(patch_idx, uint64_t(0u)); - - auto const dset_f = Dataset(determineDatatype(), {num_patches, }); + auto const dset_n = Dataset( + determineDatatype(), + { + num_patches, + }); + e.particlePatches["numParticles"][RecordComponent::SCALAR].resetDataset( + dset_n); + e.particlePatches["numParticles"][RecordComponent::SCALAR].store( + patch_idx, num_particles); + e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR] + .resetDataset(dset_n); + e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR].store( + patch_idx, uint64_t(0u)); + + auto const dset_f = Dataset( + determineDatatype(), + { + num_patches, + }); e.particlePatches["offset"]["x"].resetDataset(dset_f); e.particlePatches["offset"]["x"].store(patch_idx, 0.f); e.particlePatches["extent"]["x"].resetDataset(dset_f); e.particlePatches["extent"]["x"].store(patch_idx, 50.f); } -TEST_CASE( "patch_test", "[serial]" ) +TEST_CASE("patch_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - patch_test( t ); + patch_test(t); Series list{"../samples/serial_patch." + t, Access::READ_ONLY}; helper::listSeries(list); } } -inline -void deletion_test(const std::string & backend) +inline void deletion_test(const std::string &backend) { Series o = Series("../samples/serial_deletion." + backend, Access::CREATE); - - o.setAttribute("removed", - "this attribute will be removed after being written to disk"); + o.setAttribute( + "removed", + "this attribute will be removed after being written to disk"); o.flush(); o.deleteAttribute("removed"); o.flush(); - ParticleSpecies& e = o.iterations[1].particles["e"]; + ParticleSpecies &e = o.iterations[1].particles["e"]; auto dset = Dataset(Datatype::DOUBLE, {1}); e["position"][RecordComponent::SCALAR].resetDataset(dset); e["position"][RecordComponent::SCALAR].makeConstant(20.0); @@ -1950,7 +2247,8 @@ void deletion_test(const std::string & backend) e["deletion_scalar_two"][RecordComponent::SCALAR].resetDataset(dset); o.flush(); - e["deletion_scalar_two"].erase(e["deletion_scalar_two"].find(RecordComponent::SCALAR)); + e["deletion_scalar_two"].erase( + e["deletion_scalar_two"].find(RecordComponent::SCALAR)); e.erase(e.find("deletion_scalar_two")); o.flush(); @@ -1964,66 +2262,74 @@ void deletion_test(const std::string & backend) o.flush(); } -TEST_CASE( "deletion_test", "[serial]" ) +TEST_CASE("deletion_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { if (t == "bp") { continue; // deletion not implemented in ADIOS1 backend } - deletion_test( t ); + deletion_test(t); } } -inline -void read_missing_throw_test(const std::string & backend) +inline void read_missing_throw_test(const std::string &backend) { try { - auto s = Series("this/does/definitely/not/exist." + backend, Access::READ_ONLY); + auto s = Series( + "this/does/definitely/not/exist." + backend, Access::READ_ONLY); } - catch( ... ) + catch (...) { - std::cout << "read missing: successfully caught! " << backend << std::endl; + std::cout << "read missing: successfully caught! " << backend + << std::endl; } } -TEST_CASE( "read_missing_throw_test", "[serial]" ) +TEST_CASE("read_missing_throw_test", "[serial]") { - for (auto const & t: testedFileExtensions()) - read_missing_throw_test( t ); + for (auto const &t : testedFileExtensions()) + read_missing_throw_test(t); } -inline -void optional_paths_110_test(const std::string & backend) +inline void optional_paths_110_test(const std::string &backend) { try { { - Series s = Series("../samples/issue-sample/no_fields/data%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/issue-sample/no_fields/data%T." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 1); - REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); REQUIRE(s.iterations[400].meshes.empty()); REQUIRE(s.iterations[400].particles.size() == 1); } { - Series s = Series("../samples/issue-sample/no_particles/data%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/issue-sample/no_particles/data%T." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 1); - REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); REQUIRE(s.iterations[400].meshes.size() == 2); REQUIRE(s.iterations[400].particles.empty()); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; } { - Series s = Series("../samples/no_meshes_1.1.0_compliant." + backend, Access::CREATE); + Series s = Series( + "../samples/no_meshes_1.1.0_compliant." + backend, Access::CREATE); auto foo = s.iterations[1].particles["foo"]; Dataset dset = Dataset(Datatype::DOUBLE, {1}); foo["position"][RecordComponent::SCALAR].resetDataset(dset); @@ -2033,14 +2339,18 @@ void optional_paths_110_test(const std::string & backend) } { - Series s = Series("../samples/no_particles_1.1.0_compliant." + backend, Access::CREATE); + Series s = Series( + "../samples/no_particles_1.1.0_compliant." + backend, + Access::CREATE); auto foo = s.iterations[1].meshes["foo"]; Dataset dset = Dataset(Datatype::DOUBLE, {1}); foo[RecordComponent::SCALAR].resetDataset(dset); } { - Series s = Series("../samples/no_meshes_1.1.0_compliant." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/no_meshes_1.1.0_compliant." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 0); REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); @@ -2049,7 +2359,9 @@ void optional_paths_110_test(const std::string & backend) } { - Series s = Series("../samples/no_particles_1.1.0_compliant." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/no_particles_1.1.0_compliant." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 1); REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 0); @@ -2059,32 +2371,26 @@ void optional_paths_110_test(const std::string & backend) } void git_early_chunk_query( - std::string const filename, - std::string const species, - int const step -) + std::string const filename, std::string const species, int const step) { try { - Series s = Series( - filename, - Access::READ_ONLY - ); + Series s = Series(filename, Access::READ_ONLY); auto electrons = s.iterations[step].particles[species]; - for( auto & r : electrons ) + for (auto &r : electrons) { std::cout << r.first << ": "; - for( auto & r_c : r.second ) + for (auto &r_c : r.second) { std::cout << r_c.first << "\n"; auto chunks = r_c.second.availableChunks(); std::cout << "no. of chunks: " << chunks.size() << std::endl; } } - - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -2092,35 +2398,45 @@ void git_early_chunk_query( } #if openPMD_HAVE_HDF5 -TEST_CASE( "empty_alternate_fbpic", "[serial][hdf5]" ) +TEST_CASE("empty_alternate_fbpic", "[serial][hdf5]") { // Ref.: https://github.com/openPMD/openPMD-viewer/issues/296 try { { - Series s = Series("../samples/issue-sample/empty_alternate_fbpic_%T.h5", Access::READ_ONLY); + Series s = Series( + "../samples/issue-sample/empty_alternate_fbpic_%T.h5", + Access::READ_ONLY); REQUIRE(s.iterations.contains(50)); REQUIRE(s.iterations[50].particles.contains("electrons")); - REQUIRE(s.iterations[50].particles["electrons"].contains("momentum")); - REQUIRE(s.iterations[50].particles["electrons"]["momentum"].contains("x")); - auto empty_rc = s.iterations[50].particles["electrons"]["momentum"]["x"]; + REQUIRE( + s.iterations[50].particles["electrons"].contains("momentum")); + REQUIRE( + s.iterations[50].particles["electrons"]["momentum"].contains( + "x")); + auto empty_rc = + s.iterations[50].particles["electrons"]["momentum"]["x"]; REQUIRE(empty_rc.empty()); REQUIRE(empty_rc.getDimensionality() == 1); REQUIRE(empty_rc.getExtent() == Extent{0}); - REQUIRE(isSame(empty_rc.getDatatype(), determineDatatype< double >())); + REQUIRE( + isSame(empty_rc.getDatatype(), determineDatatype())); } { - Series list{ "../samples/issue-sample/empty_alternate_fbpic_%T.h5", Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/issue-sample/empty_alternate_fbpic_%T.h5", + Access::READ_ONLY}; + helper::listSeries(list); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; } } -TEST_CASE( "available_chunks_test_hdf5", "[serial][json]" ) +TEST_CASE("available_chunks_test_hdf5", "[serial][json]") { /* * This test is HDF5 specific @@ -2133,45 +2449,44 @@ TEST_CASE( "available_chunks_test_hdf5", "[serial][json]" ) constexpr unsigned height = 10; std::string name = "../samples/available_chunks.h5"; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE ); - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { height, 4 } } ); - for( unsigned line = 2; line < 7; ++line ) + Series write(name, Access::CREATE); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {height, 4}}); + for (unsigned line = 2; line < 7; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 4 } ); + E_x.storeChunk(data, {line, 0}, {1, 4}); } - for( unsigned line = 7; line < 9; ++line ) + for (unsigned line = 7; line < 9; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 2 } ); + E_x.storeChunk(data, {line, 0}, {1, 2}); } - E_x.storeChunk( data, { 8, 3 }, {2, 1 } ); + E_x.storeChunk(data, {8, 3}, {2, 1}); it0.close(); } { - Series read( name, Access::READ_ONLY ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; + Series read(name, Access::READ_ONLY); + Iteration it0 = read.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); - REQUIRE( table.size() == 1 ); + REQUIRE(table.size() == 1); /* * Explicitly convert things to bool, so Catch doesn't get the splendid * idea to print the Chunk struct. */ - REQUIRE( - bool( table[ 0 ] == WrittenChunkInfo( { 0, 0 }, { height, 4 } ) ) ); + REQUIRE(bool(table[0] == WrittenChunkInfo({0, 0}, {height, 4}))); } } -TEST_CASE( "optional_paths_110_test", "[serial]" ) +TEST_CASE("optional_paths_110_test", "[serial]") { optional_paths_110_test("h5"); // samples only present for hdf5 } -TEST_CASE( "git_hdf5_sample_structure_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_structure_test", "[serial][hdf5]") { #if openPMD_USE_INVASIVE_TESTS try @@ -2182,40 +2497,129 @@ TEST_CASE( "git_hdf5_sample_structure_test", "[serial][hdf5]" ) REQUIRE(o.iterations.parent() == getWritable(&o)); REQUIRE_THROWS_AS(o.iterations[42], std::out_of_range); REQUIRE(o.iterations[100].parent() == getWritable(&o.iterations)); - REQUIRE(o.iterations[100].meshes.parent() == getWritable(&o.iterations[100])); - REQUIRE(o.iterations[100].meshes["E"].parent() == getWritable(&o.iterations[100].meshes)); - REQUIRE(o.iterations[100].meshes["E"]["x"].parent() == getWritable(&o.iterations[100].meshes["E"])); - REQUIRE(o.iterations[100].meshes["E"]["y"].parent() == getWritable(&o.iterations[100].meshes["E"])); - REQUIRE(o.iterations[100].meshes["E"]["z"].parent() == getWritable(&o.iterations[100].meshes["E"])); - REQUIRE(o.iterations[100].meshes["rho"].parent() == getWritable(&o.iterations[100].meshes)); - REQUIRE(o.iterations[100].meshes["rho"][MeshRecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].meshes)); - REQUIRE_THROWS_AS(o.iterations[100].meshes["cherries"], std::out_of_range); - REQUIRE(o.iterations[100].particles.parent() == getWritable(&o.iterations[100])); - REQUIRE(o.iterations[100].particles["electrons"].parent() == getWritable(&o.iterations[100].particles)); - REQUIRE(o.iterations[100].particles["electrons"]["charge"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["charge"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["mass"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["mass"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"]["x"].parent() == getWritable(&o.iterations[100].particles["electrons"]["momentum"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"]["y"].parent() == getWritable(&o.iterations[100].particles["electrons"]["momentum"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"]["z"].parent() == getWritable(&o.iterations[100].particles["electrons"]["momentum"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"]["x"].parent() == getWritable(&o.iterations[100].particles["electrons"]["position"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"]["y"].parent() == getWritable(&o.iterations[100].particles["electrons"]["position"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"]["z"].parent() == getWritable(&o.iterations[100].particles["electrons"]["position"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"]["x"].parent() == getWritable(&o.iterations[100].particles["electrons"]["positionOffset"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"]["y"].parent() == getWritable(&o.iterations[100].particles["electrons"]["positionOffset"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"]["z"].parent() == getWritable(&o.iterations[100].particles["electrons"]["positionOffset"])); - REQUIRE(o.iterations[100].particles["electrons"]["weighting"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["weighting"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE_THROWS_AS(o.iterations[100].particles["electrons"]["numberOfLegs"], std::out_of_range); - REQUIRE_THROWS_AS(o.iterations[100].particles["apples"], std::out_of_range); + REQUIRE( + o.iterations[100].meshes.parent() == + getWritable(&o.iterations[100])); + REQUIRE( + o.iterations[100].meshes["E"].parent() == + getWritable(&o.iterations[100].meshes)); + REQUIRE( + o.iterations[100].meshes["E"]["x"].parent() == + getWritable(&o.iterations[100].meshes["E"])); + REQUIRE( + o.iterations[100].meshes["E"]["y"].parent() == + getWritable(&o.iterations[100].meshes["E"])); + REQUIRE( + o.iterations[100].meshes["E"]["z"].parent() == + getWritable(&o.iterations[100].meshes["E"])); + REQUIRE( + o.iterations[100].meshes["rho"].parent() == + getWritable(&o.iterations[100].meshes)); + REQUIRE( + o.iterations[100] + .meshes["rho"][MeshRecordComponent::SCALAR] + .parent() == getWritable(&o.iterations[100].meshes)); + REQUIRE_THROWS_AS( + o.iterations[100].meshes["cherries"], std::out_of_range); + REQUIRE( + o.iterations[100].particles.parent() == + getWritable(&o.iterations[100])); + REQUIRE( + o.iterations[100].particles["electrons"].parent() == + getWritable(&o.iterations[100].particles)); + REQUIRE( + o.iterations[100].particles["electrons"]["charge"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["charge"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100].particles["electrons"]["mass"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["mass"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100].particles["electrons"]["momentum"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["momentum"]["x"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["momentum"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["momentum"]["y"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["momentum"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["momentum"]["z"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["momentum"])); + REQUIRE( + o.iterations[100].particles["electrons"]["position"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["position"]["x"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["position"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["position"]["y"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["position"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["position"]["z"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["position"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"]["x"] + .parent() == + getWritable( + &o.iterations[100].particles["electrons"]["positionOffset"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"]["y"] + .parent() == + getWritable( + &o.iterations[100].particles["electrons"]["positionOffset"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"]["z"] + .parent() == + getWritable( + &o.iterations[100].particles["electrons"]["positionOffset"])); + REQUIRE( + o.iterations[100].particles["electrons"]["weighting"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["weighting"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE_THROWS_AS( + o.iterations[100].particles["electrons"]["numberOfLegs"], + std::out_of_range); + REQUIRE_THROWS_AS( + o.iterations[100].particles["apples"], std::out_of_range); int32_t i32 = 32; REQUIRE_THROWS(o.setAttribute("setAttributeFail", i32)); - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -2225,7 +2629,7 @@ TEST_CASE( "git_hdf5_sample_structure_test", "[serial][hdf5]" ) #endif } -TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_attribute_test", "[serial][hdf5]") { try { @@ -2243,88 +2647,92 @@ TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(100) == 1); - Iteration& iteration_100 = o.iterations[100]; - REQUIRE(iteration_100.time< double >() == 3.2847121452090077e-14); - REQUIRE(iteration_100.dt< double >() == 3.2847121452090093e-16); + Iteration &iteration_100 = o.iterations[100]; + REQUIRE(iteration_100.time() == 3.2847121452090077e-14); + REQUIRE(iteration_100.dt() == 3.2847121452090093e-16); REQUIRE(iteration_100.timeUnitSI() == 1.0); REQUIRE(iteration_100.meshes.size() == 2); REQUIRE(iteration_100.meshes.count("E") == 1); REQUIRE(iteration_100.meshes.count("rho") == 1); - std::vector< std::string > al{"x", "y", "z"}; - std::vector< double > gs{8.0000000000000007e-07, - 8.0000000000000007e-07, - 1.0000000000000001e-07}; - std::vector< double > ggo{-1.0000000000000001e-05, - -1.0000000000000001e-05, - -5.1999999999999993e-06}; - std::array< double, 7 > ud{{1., 1., -3., -1., 0., 0., 0.}}; - Mesh& E = iteration_100.meshes["E"]; + std::vector al{"x", "y", "z"}; + std::vector gs{ + 8.0000000000000007e-07, + 8.0000000000000007e-07, + 1.0000000000000001e-07}; + std::vector ggo{ + -1.0000000000000001e-05, + -1.0000000000000001e-05, + -5.1999999999999993e-06}; + std::array ud{{1., 1., -3., -1., 0., 0., 0.}}; + Mesh &E = iteration_100.meshes["E"]; REQUIRE(E.geometry() == Mesh::Geometry::cartesian); REQUIRE(E.dataOrder() == Mesh::DataOrder::C); REQUIRE(E.axisLabels() == al); - REQUIRE(E.gridSpacing< double >() == gs); + REQUIRE(E.gridSpacing() == gs); REQUIRE(E.gridGlobalOffset() == ggo); REQUIRE(E.gridUnitSI() == 1.0); REQUIRE(E.unitDimension() == ud); - REQUIRE(E.timeOffset< double >() == 0.0); + REQUIRE(E.timeOffset() == 0.0); REQUIRE(E.size() == 3); REQUIRE(E.count("x") == 1); REQUIRE(E.count("y") == 1); REQUIRE(E.count("z") == 1); - std::vector< double > p{0.5, 0., 0.}; + std::vector p{0.5, 0., 0.}; Extent e{26, 26, 201}; - MeshRecordComponent& E_x = E["x"]; + MeshRecordComponent &E_x = E["x"]; REQUIRE(E_x.unitSI() == 1.0); - REQUIRE(E_x.position< double >() == p); + REQUIRE(E_x.position() == p); REQUIRE(E_x.getDatatype() == Datatype::DOUBLE); REQUIRE(E_x.getExtent() == e); REQUIRE(E_x.getDimensionality() == 3); p = {0., 0.5, 0.}; - MeshRecordComponent& E_y = E["y"]; + MeshRecordComponent &E_y = E["y"]; REQUIRE(E_y.unitSI() == 1.0); - REQUIRE(E_y.position< double >() == p); + REQUIRE(E_y.position() == p); REQUIRE(E_y.getDatatype() == Datatype::DOUBLE); REQUIRE(E_y.getExtent() == e); REQUIRE(E_y.getDimensionality() == 3); p = {0., 0., 0.5}; - MeshRecordComponent& E_z = E["z"]; + MeshRecordComponent &E_z = E["z"]; REQUIRE(E_z.unitSI() == 1.0); - REQUIRE(E_z.position< double >() == p); + REQUIRE(E_z.position() == p); REQUIRE(E_z.getDatatype() == Datatype::DOUBLE); REQUIRE(E_z.getExtent() == e); REQUIRE(E_z.getDimensionality() == 3); - gs = {8.0000000000000007e-07, - 8.0000000000000007e-07, - 1.0000000000000001e-07}; - ggo = {-1.0000000000000001e-05, - -1.0000000000000001e-05, - -5.1999999999999993e-06}; - ud = {{-3., 0., 1., 1., 0., 0., 0.}}; - Mesh& rho = iteration_100.meshes["rho"]; + gs = { + 8.0000000000000007e-07, + 8.0000000000000007e-07, + 1.0000000000000001e-07}; + ggo = { + -1.0000000000000001e-05, + -1.0000000000000001e-05, + -5.1999999999999993e-06}; + ud = {{-3., 0., 1., 1., 0., 0., 0.}}; + Mesh &rho = iteration_100.meshes["rho"]; REQUIRE(rho.geometry() == Mesh::Geometry::cartesian); REQUIRE(rho.dataOrder() == Mesh::DataOrder::C); REQUIRE(rho.axisLabels() == al); - REQUIRE(rho.gridSpacing< double >() == gs); + REQUIRE(rho.gridSpacing() == gs); REQUIRE(rho.gridGlobalOffset() == ggo); REQUIRE(rho.gridUnitSI() == 1.0); REQUIRE(rho.unitDimension() == ud); - REQUIRE(rho.timeOffset< double >() == 0.0); + REQUIRE(rho.timeOffset() == 0.0); REQUIRE(rho.size() == 1); REQUIRE(rho.count(MeshRecordComponent::SCALAR) == 1); p = {0., 0., 0.}; e = {26, 26, 201}; - MeshRecordComponent& rho_scalar = rho[MeshRecordComponent::SCALAR]; + MeshRecordComponent &rho_scalar = rho[MeshRecordComponent::SCALAR]; REQUIRE(rho_scalar.unitSI() == 1.0); - REQUIRE(rho_scalar.position< double >() == p); + REQUIRE(rho_scalar.position() == p); REQUIRE(rho_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(rho_scalar.getExtent() == e); REQUIRE(rho_scalar.getDimensionality() == 3); @@ -2332,7 +2740,7 @@ TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) REQUIRE(iteration_100.particles.size() == 1); REQUIRE(iteration_100.particles.count("electrons") == 1); - ParticleSpecies& electrons = iteration_100.particles["electrons"]; + ParticleSpecies &electrons = iteration_100.particles["electrons"]; REQUIRE(electrons.size() == 6); REQUIRE(electrons.count("charge") == 1); @@ -2342,190 +2750,214 @@ TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) REQUIRE(electrons.count("positionOffset") == 1); REQUIRE(electrons.count("weighting") == 1); - ud = {{0., 0., 1., 1., 0., 0., 0.}}; - Record& charge = electrons["charge"]; + ud = {{0., 0., 1., 1., 0., 0., 0.}}; + Record &charge = electrons["charge"]; REQUIRE(charge.unitDimension() == ud); - REQUIRE(charge.timeOffset< double >() == 0.0); + REQUIRE(charge.timeOffset() == 0.0); REQUIRE(charge.size() == 1); REQUIRE(charge.count(RecordComponent::SCALAR) == 1); e = {85625}; - RecordComponent& charge_scalar = charge[RecordComponent::SCALAR]; + RecordComponent &charge_scalar = charge[RecordComponent::SCALAR]; REQUIRE(charge_scalar.unitSI() == 1.0); REQUIRE(charge_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(charge_scalar.getDimensionality() == 1); REQUIRE(charge_scalar.getExtent() == e); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& mass = electrons["mass"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &mass = electrons["mass"]; REQUIRE(mass.unitDimension() == ud); - REQUIRE(mass.timeOffset< double >() == 0.0); + REQUIRE(mass.timeOffset() == 0.0); REQUIRE(mass.size() == 1); REQUIRE(mass.count(RecordComponent::SCALAR) == 1); - RecordComponent& mass_scalar = mass[RecordComponent::SCALAR]; + RecordComponent &mass_scalar = mass[RecordComponent::SCALAR]; REQUIRE(mass_scalar.unitSI() == 1.0); REQUIRE(mass_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(mass_scalar.getDimensionality() == 1); REQUIRE(mass_scalar.getExtent() == e); - ud = {{1., 1., -1., 0., 0., 0., 0.}}; - Record& momentum = electrons["momentum"]; + ud = {{1., 1., -1., 0., 0., 0., 0.}}; + Record &momentum = electrons["momentum"]; REQUIRE(momentum.unitDimension() == ud); - REQUIRE(momentum.timeOffset< double >() == 0.0); + REQUIRE(momentum.timeOffset() == 0.0); REQUIRE(momentum.size() == 3); REQUIRE(momentum.count("x") == 1); REQUIRE(momentum.count("y") == 1); REQUIRE(momentum.count("z") == 1); - RecordComponent& momentum_x = momentum["x"]; + RecordComponent &momentum_x = momentum["x"]; REQUIRE(momentum_x.unitSI() == 1.0); REQUIRE(momentum_x.getDatatype() == Datatype::DOUBLE); REQUIRE(momentum_x.getDimensionality() == 1); REQUIRE(momentum_x.getExtent() == e); - RecordComponent& momentum_y = momentum["y"]; + RecordComponent &momentum_y = momentum["y"]; REQUIRE(momentum_y.unitSI() == 1.0); REQUIRE(momentum_y.getDatatype() == Datatype::DOUBLE); REQUIRE(momentum_y.getDimensionality() == 1); REQUIRE(momentum_y.getExtent() == e); - RecordComponent& momentum_z = momentum["z"]; + RecordComponent &momentum_z = momentum["z"]; REQUIRE(momentum_z.unitSI() == 1.0); REQUIRE(momentum_z.getDatatype() == Datatype::DOUBLE); REQUIRE(momentum_z.getDimensionality() == 1); REQUIRE(momentum_z.getExtent() == e); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& position = electrons["position"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &position = electrons["position"]; REQUIRE(position.unitDimension() == ud); - REQUIRE(position.timeOffset< double >() == 0.0); + REQUIRE(position.timeOffset() == 0.0); REQUIRE(position.size() == 3); REQUIRE(position.count("x") == 1); REQUIRE(position.count("y") == 1); REQUIRE(position.count("z") == 1); - RecordComponent& position_x = position["x"]; + RecordComponent &position_x = position["x"]; REQUIRE(position_x.unitSI() == 1.0); REQUIRE(position_x.getDatatype() == Datatype::DOUBLE); REQUIRE(position_x.getDimensionality() == 1); REQUIRE(position_x.getExtent() == e); - RecordComponent& position_y = position["y"]; + RecordComponent &position_y = position["y"]; REQUIRE(position_y.unitSI() == 1.0); REQUIRE(position_y.getDatatype() == Datatype::DOUBLE); REQUIRE(position_y.getDimensionality() == 1); REQUIRE(position_y.getExtent() == e); - RecordComponent& position_z = position["z"]; + RecordComponent &position_z = position["z"]; REQUIRE(position_z.unitSI() == 1.0); REQUIRE(position_z.getDatatype() == Datatype::DOUBLE); REQUIRE(position_z.getDimensionality() == 1); REQUIRE(position_z.getExtent() == e); - Record& positionOffset = electrons["positionOffset"]; + Record &positionOffset = electrons["positionOffset"]; REQUIRE(positionOffset.unitDimension() == ud); - REQUIRE(positionOffset.timeOffset< double >() == 0.0); + REQUIRE(positionOffset.timeOffset() == 0.0); REQUIRE(positionOffset.size() == 3); REQUIRE(positionOffset.count("x") == 1); REQUIRE(positionOffset.count("y") == 1); REQUIRE(positionOffset.count("z") == 1); - RecordComponent& positionOffset_x = positionOffset["x"]; + RecordComponent &positionOffset_x = positionOffset["x"]; REQUIRE(positionOffset_x.unitSI() == 1.0); REQUIRE(positionOffset_x.getDatatype() == Datatype::DOUBLE); REQUIRE(positionOffset_x.getDimensionality() == 1); REQUIRE(positionOffset_x.getExtent() == e); - RecordComponent& positionOffset_y = positionOffset["y"]; + RecordComponent &positionOffset_y = positionOffset["y"]; REQUIRE(positionOffset_y.unitSI() == 1.0); REQUIRE(positionOffset_y.getDatatype() == Datatype::DOUBLE); REQUIRE(positionOffset_y.getDimensionality() == 1); REQUIRE(positionOffset_y.getExtent() == e); - RecordComponent& positionOffset_z = positionOffset["z"]; + RecordComponent &positionOffset_z = positionOffset["z"]; REQUIRE(positionOffset_z.unitSI() == 1.0); REQUIRE(positionOffset_z.getDatatype() == Datatype::DOUBLE); REQUIRE(positionOffset_z.getDimensionality() == 1); REQUIRE(positionOffset_z.getExtent() == e); - ud = {{0., 0., 0., 0., 0., 0., 0.}}; - Record& weighting = electrons["weighting"]; + ud = {{0., 0., 0., 0., 0., 0., 0.}}; + Record &weighting = electrons["weighting"]; REQUIRE(weighting.unitDimension() == ud); - REQUIRE(weighting.timeOffset< double >() == 0.0); + REQUIRE(weighting.timeOffset() == 0.0); REQUIRE(weighting.size() == 1); REQUIRE(weighting.count(RecordComponent::SCALAR) == 1); - RecordComponent& weighting_scalar = weighting[RecordComponent::SCALAR]; + RecordComponent &weighting_scalar = weighting[RecordComponent::SCALAR]; REQUIRE(weighting_scalar.unitSI() == 1.0); REQUIRE(weighting_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(weighting_scalar.getDimensionality() == 1); REQUIRE(weighting_scalar.getExtent() == e); - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "git_hdf5_sample_content_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_content_test", "[serial][hdf5]") { try { Series o = Series("../samples/git-sample/data%T.h5", Access::READ_ONLY); { - double actual[3][3][3] = {{{-1.9080703683727052e-09, -1.5632650729457964e-10, 1.1497536256399599e-09}, - {-1.9979540244463578e-09, -2.5512036927466397e-10, 1.0402234629225404e-09}, - {-1.7353589676361025e-09, -8.0899198451334087e-10, -1.6443779671249104e-10}}, - - {{-2.0029988778702545e-09, -1.9543477947081556e-10, 1.0916454407094989e-09}, - {-2.3890367462087170e-09, -4.7158010829662089e-10, 9.0026075483251589e-10}, - {-1.9033881137886510e-09, -7.5192119197708962e-10, 5.0038861942880430e-10}}, - - {{-1.3271805876513554e-09, -5.9243276950837753e-10, -2.2445734160214670e-10}, - {-7.4578609954301101e-10, -1.1995737736469891e-10, 2.5611823772919706e-10}, - {-9.4806251738077663e-10, -1.5472800818372434e-10, -3.6461900165818406e-10}}}; + double actual[3][3][3] = { + {{-1.9080703683727052e-09, + -1.5632650729457964e-10, + 1.1497536256399599e-09}, + {-1.9979540244463578e-09, + -2.5512036927466397e-10, + 1.0402234629225404e-09}, + {-1.7353589676361025e-09, + -8.0899198451334087e-10, + -1.6443779671249104e-10}}, + + {{-2.0029988778702545e-09, + -1.9543477947081556e-10, + 1.0916454407094989e-09}, + {-2.3890367462087170e-09, + -4.7158010829662089e-10, + 9.0026075483251589e-10}, + {-1.9033881137886510e-09, + -7.5192119197708962e-10, + 5.0038861942880430e-10}}, + + {{-1.3271805876513554e-09, + -5.9243276950837753e-10, + -2.2445734160214670e-10}, + {-7.4578609954301101e-10, + -1.1995737736469891e-10, + 2.5611823772919706e-10}, + {-9.4806251738077663e-10, + -1.5472800818372434e-10, + -3.6461900165818406e-10}}}; Mesh rhoMesh = o.iterations[100].meshes["rho"]; MeshRecordComponent rho = rhoMesh[MeshRecordComponent::SCALAR]; Offset offset{20, 20, 190}; Extent extent{3, 3, 3}; auto data = rho.loadChunk(offset, extent); rhoMesh.seriesFlush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int i = 0; i < 3; ++i ) - for( int j = 0; j < 3; ++j ) - for( int k = 0; k < 3; ++k ) - REQUIRE(raw_ptr[((i*3) + j)*3 + k] == actual[i][j][k]); + for (int i = 0; i < 3; ++i) + for (int j = 0; j < 3; ++j) + for (int k = 0; k < 3; ++k) + REQUIRE( + raw_ptr[((i * 3) + j) * 3 + k] == actual[i][j][k]); } { double constant_value = 9.1093829099999999e-31; - RecordComponent& electrons_mass = o.iterations[100].particles["electrons"]["mass"][RecordComponent::SCALAR]; + RecordComponent &electrons_mass = + o.iterations[100] + .particles["electrons"]["mass"][RecordComponent::SCALAR]; Offset offset{15}; Extent extent{3}; auto data = electrons_mass.loadChunk(offset, extent); o.flush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int i = 0; i < 3; ++i ) + for (int i = 0; i < 3; ++i) REQUIRE(raw_ptr[i] == constant_value); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_fileBased_read_test", "[serial][hdf5]") { try { @@ -2541,7 +2973,8 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.get().m_filenamePadding == 8); #endif - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -2549,7 +2982,8 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) try { - Series o = Series("../samples/git-sample/data%08T.h5", Access::READ_ONLY); + Series o = + Series("../samples/git-sample/data%08T.h5", Access::READ_ONLY); REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(100) == 1); @@ -2561,29 +2995,33 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.get().m_filenamePadding == 8); #endif - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } - REQUIRE_THROWS_WITH(Series("../samples/git-sample/data%07T.h5", Access::READ_ONLY), - Catch::Equals("No matching iterations found: data%07T")); + REQUIRE_THROWS_WITH( + Series("../samples/git-sample/data%07T.h5", Access::READ_ONLY), + Catch::Equals("No matching iterations found: data%07T")); try { - std::vector< std::string > newFiles{"../samples/git-sample/data00000001.h5", - "../samples/git-sample/data00000010.h5", - "../samples/git-sample/data00001000.h5", - "../samples/git-sample/data00010000.h5", - "../samples/git-sample/data00100000.h5"}; + std::vector newFiles{ + "../samples/git-sample/data00000001.h5", + "../samples/git-sample/data00000010.h5", + "../samples/git-sample/data00001000.h5", + "../samples/git-sample/data00010000.h5", + "../samples/git-sample/data00100000.h5"}; - for( auto const& file : newFiles ) - if( auxiliary::file_exists(file) ) + for (auto const &file : newFiles) + if (auxiliary::file_exists(file)) auxiliary::remove_file(file); { - Series o = Series("../samples/git-sample/data%T.h5", Access::READ_WRITE); + Series o = + Series("../samples/git-sample/data%T.h5", Access::READ_WRITE); #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.get().m_filenamePadding == 8); @@ -2597,28 +3035,30 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) o.flush(); } - for( auto const& file : newFiles ) + for (auto const &file : newFiles) { REQUIRE(auxiliary::file_exists(file)); auxiliary::remove_file(file); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "git_hdf5_early_chunk_query", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_early_chunk_query", "[serial][hdf5]") { git_early_chunk_query("../samples/git-sample/data%T.h5", "electrons", 400); } -TEST_CASE( "git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]" ) +TEST_CASE("git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]") { try { - Series o = Series("../samples/git-sample/thetaMode/data%T.h5", Access::READ_ONLY); + Series o = Series( + "../samples/git-sample/thetaMode/data%T.h5", Access::READ_ONLY); REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(100) == 1); @@ -2636,23 +3076,31 @@ TEST_CASE( "git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]" ) REQUIRE(i.meshes.count("rho") == 1); Mesh B = i.meshes["B"]; - std::vector< std::string > const al{"r", "z"}; - std::vector< double > const gs{3.e-7, 1.e-7}; - std::vector< double > const ggo{0., 3.02e-5}; - std::array< double, 7 > const ud{{0., 1., -2., -1., 0., 0., 0.}}; + std::vector const al{"r", "z"}; + std::vector const gs{3.e-7, 1.e-7}; + std::vector const ggo{0., 3.02e-5}; + std::array const ud{{0., 1., -2., -1., 0., 0., 0.}}; REQUIRE(B.geometry() == Mesh::Geometry::thetaMode); REQUIRE(B.geometryParameters() == "m=2;imag=+"); REQUIRE(B.dataOrder() == Mesh::DataOrder::C); REQUIRE(B.axisLabels() == al); - REQUIRE(B.gridSpacing< double >().size() == 2u); + REQUIRE(B.gridSpacing().size() == 2u); REQUIRE(B.gridGlobalOffset().size() == 2u); - REQUIRE(std::abs(B.gridSpacing< double >()[0] - gs[0]) <= std::numeric_limits::epsilon()); - REQUIRE(std::abs(B.gridSpacing< double >()[1] - gs[1]) <= std::numeric_limits::epsilon()); - REQUIRE(std::abs(B.gridGlobalOffset()[0] - ggo[0]) <= std::numeric_limits::epsilon()); - REQUIRE(std::abs(B.gridGlobalOffset()[1] - ggo[1]) <= std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridSpacing()[0] - gs[0]) <= + std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridSpacing()[1] - gs[1]) <= + std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridGlobalOffset()[0] - ggo[0]) <= + std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridGlobalOffset()[1] - ggo[1]) <= + std::numeric_limits::epsilon()); REQUIRE(B.gridUnitSI() == 1.0); REQUIRE(B.unitDimension() == ud); - REQUIRE(B.timeOffset< double >() == static_cast< double >(0.0f)); + REQUIRE(B.timeOffset() == static_cast(0.0f)); REQUIRE(B.size() == 3); REQUIRE(B.count("r") == 1); @@ -2660,33 +3108,35 @@ TEST_CASE( "git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]" ) REQUIRE(B.count("z") == 1); MeshRecordComponent B_z = B["z"]; - std::vector< double > const pos{0.5, 0.0}; + std::vector const pos{0.5, 0.0}; Extent const ext{3, 51, 201}; REQUIRE(B_z.unitSI() == 1.0); - REQUIRE(B_z.position< double >() == pos); + REQUIRE(B_z.position() == pos); REQUIRE(B_z.getDatatype() == Datatype::DOUBLE); REQUIRE(B_z.getExtent() == ext); REQUIRE(B_z.getDimensionality() == 3); Offset const offset{1, 10, 90}; // skip mode_0 (one scalar field) Extent const extent{2, 30, 20}; // mode_1 (two scalar fields) - auto data = B_z.loadChunk< double >(offset, extent); + auto data = B_z.loadChunk(offset, extent); o.flush(); - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) +TEST_CASE("hzdr_hdf5_sample_content_test", "[serial][hdf5]") { // since this file might not be publicly available, gracefully handle errors try { /* HZDR: /bigdata/hplsim/development/huebl/lwfa-openPMD-062-smallLWFA-h5 * DOI:10.14278/rodare.57 */ - Series o = Series("../samples/hzdr-sample/h5/simData_%T.h5", Access::READ_ONLY); + Series o = Series( + "../samples/hzdr-sample/h5/simData_%T.h5", Access::READ_ONLY); REQUIRE(o.openPMD() == "1.0.0"); REQUIRE(o.openPMDextension() == 1); @@ -2704,9 +3154,9 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(o.iterations.size() >= 1); REQUIRE(o.iterations.count(0) == 1); - Iteration& i = o.iterations[0]; - REQUIRE(i.time< float >() == static_cast< float >(0.0f)); - REQUIRE(i.dt< float >() == static_cast< float >(1.0f)); + Iteration &i = o.iterations[0]; + REQUIRE(i.time() == static_cast(0.0f)); + REQUIRE(i.dt() == static_cast(1.0f)); REQUIRE(i.timeUnitSI() == 1.3899999999999999e-16); REQUIRE(i.meshes.size() == 4); @@ -2715,146 +3165,158 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(i.meshes.count("e_chargeDensity") == 1); REQUIRE(i.meshes.count("e_energyDensity") == 1); - std::vector< std::string > al{"z", "y", "x"}; - std::vector< float > gs{static_cast< float >(6.2393283843994141f), - static_cast< float >(1.0630855560302734f), - static_cast< float >(6.2393283843994141f)}; - std::vector< double > ggo{0., 0., 0.}; - std::array< double, 7 > ud{{0., 1., -2., -1., 0., 0., 0.}}; - Mesh& B = i.meshes["B"]; + std::vector al{"z", "y", "x"}; + std::vector gs{ + static_cast(6.2393283843994141f), + static_cast(1.0630855560302734f), + static_cast(6.2393283843994141f)}; + std::vector ggo{0., 0., 0.}; + std::array ud{{0., 1., -2., -1., 0., 0., 0.}}; + Mesh &B = i.meshes["B"]; REQUIRE(B.geometry() == Mesh::Geometry::cartesian); REQUIRE(B.dataOrder() == Mesh::DataOrder::C); REQUIRE(B.axisLabels() == al); - REQUIRE(B.gridSpacing< float >() == gs); + REQUIRE(B.gridSpacing() == gs); REQUIRE(B.gridGlobalOffset() == ggo); REQUIRE(B.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(B.unitDimension() == ud); - REQUIRE(B.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(B.timeOffset() == static_cast(0.0f)); REQUIRE(B.size() == 3); REQUIRE(B.count("x") == 1); REQUIRE(B.count("y") == 1); REQUIRE(B.count("z") == 1); - std::vector< float > p{static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.5f)}; + std::vector p{ + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.5f)}; Extent e{80, 384, 80}; - MeshRecordComponent& B_x = B["x"]; + MeshRecordComponent &B_x = B["x"]; REQUIRE(B_x.unitSI() == 40903.822240601701); - REQUIRE(B_x.position< float >() == p); + REQUIRE(B_x.position() == p); REQUIRE(B_x.getDatatype() == Datatype::FLOAT); REQUIRE(B_x.getExtent() == e); REQUIRE(B_x.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& B_y = B["y"]; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &B_y = B["y"]; REQUIRE(B_y.unitSI() == 40903.822240601701); - REQUIRE(B_y.position< float >() == p); + REQUIRE(B_y.position() == p); REQUIRE(B_y.getDatatype() == Datatype::FLOAT); REQUIRE(B_y.getExtent() == e); REQUIRE(B_y.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& B_z = B["z"]; + p = { + static_cast(0.5f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &B_z = B["z"]; REQUIRE(B_z.unitSI() == 40903.822240601701); - REQUIRE(B_z.position< float >() == p); + REQUIRE(B_z.position() == p); REQUIRE(B_z.getDatatype() == Datatype::FLOAT); REQUIRE(B_z.getExtent() == e); REQUIRE(B_z.getDimensionality() == 3); - ud = {{1., 1., -3., -1., 0., 0., 0.}}; - Mesh& E = i.meshes["E"]; + ud = {{1., 1., -3., -1., 0., 0., 0.}}; + Mesh &E = i.meshes["E"]; REQUIRE(E.geometry() == Mesh::Geometry::cartesian); REQUIRE(E.dataOrder() == Mesh::DataOrder::C); REQUIRE(E.axisLabels() == al); - REQUIRE(E.gridSpacing< float >() == gs); + REQUIRE(E.gridSpacing() == gs); REQUIRE(E.gridGlobalOffset() == ggo); REQUIRE(E.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(E.unitDimension() == ud); - REQUIRE(E.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(E.timeOffset() == static_cast(0.0f)); REQUIRE(E.size() == 3); REQUIRE(E.count("x") == 1); REQUIRE(E.count("y") == 1); REQUIRE(E.count("z") == 1); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.0f)}; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.0f)}; e = {80, 384, 80}; - MeshRecordComponent& E_x = E["x"]; + MeshRecordComponent &E_x = E["x"]; REQUIRE(E_x.unitSI() == 12262657411105.049); - REQUIRE(E_x.position< float >() == p); + REQUIRE(E_x.position() == p); REQUIRE(E_x.getDatatype() == Datatype::FLOAT); REQUIRE(E_x.getExtent() == e); REQUIRE(E_x.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& E_y = E["y"]; + p = { + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &E_y = E["y"]; REQUIRE(E_y.unitSI() == 12262657411105.049); - REQUIRE(E_y.position< float >() == p); + REQUIRE(E_y.position() == p); REQUIRE(E_y.getDatatype() == Datatype::FLOAT); REQUIRE(E_y.getExtent() == e); REQUIRE(E_y.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& E_z = E["z"]; + p = { + static_cast(0.0f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &E_z = E["z"]; REQUIRE(E_z.unitSI() == 12262657411105.049); - REQUIRE(E_z.position< float >() == p); + REQUIRE(E_z.position() == p); REQUIRE(E_z.getDatatype() == Datatype::FLOAT); REQUIRE(E_z.getExtent() == e); REQUIRE(E_z.getDimensionality() == 3); - ud = {{-3., 0., 1., 1., 0., 0., 0.}}; - Mesh& e_chargeDensity = i.meshes["e_chargeDensity"]; + ud = {{-3., 0., 1., 1., 0., 0., 0.}}; + Mesh &e_chargeDensity = i.meshes["e_chargeDensity"]; REQUIRE(e_chargeDensity.geometry() == Mesh::Geometry::cartesian); REQUIRE(e_chargeDensity.dataOrder() == Mesh::DataOrder::C); REQUIRE(e_chargeDensity.axisLabels() == al); - REQUIRE(e_chargeDensity.gridSpacing< float >() == gs); + REQUIRE(e_chargeDensity.gridSpacing() == gs); REQUIRE(e_chargeDensity.gridGlobalOffset() == ggo); REQUIRE(e_chargeDensity.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(e_chargeDensity.unitDimension() == ud); - REQUIRE(e_chargeDensity.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE( + e_chargeDensity.timeOffset() == static_cast(0.0f)); REQUIRE(e_chargeDensity.size() == 1); REQUIRE(e_chargeDensity.count(MeshRecordComponent::SCALAR) == 1); - p = {static_cast< float >(0.f), - static_cast< float >(0.f), - static_cast< float >(0.f)}; - MeshRecordComponent& e_chargeDensity_scalar = e_chargeDensity[MeshRecordComponent::SCALAR]; + p = { + static_cast(0.f), + static_cast(0.f), + static_cast(0.f)}; + MeshRecordComponent &e_chargeDensity_scalar = + e_chargeDensity[MeshRecordComponent::SCALAR]; REQUIRE(e_chargeDensity_scalar.unitSI() == 66306201.002331272); - REQUIRE(e_chargeDensity_scalar.position< float >() == p); + REQUIRE(e_chargeDensity_scalar.position() == p); REQUIRE(e_chargeDensity_scalar.getDatatype() == Datatype::FLOAT); REQUIRE(e_chargeDensity_scalar.getExtent() == e); REQUIRE(e_chargeDensity_scalar.getDimensionality() == 3); - ud = {{-1., 1., -2., 0., 0., 0., 0.}}; - Mesh& e_energyDensity = i.meshes["e_energyDensity"]; + ud = {{-1., 1., -2., 0., 0., 0., 0.}}; + Mesh &e_energyDensity = i.meshes["e_energyDensity"]; REQUIRE(e_energyDensity.geometry() == Mesh::Geometry::cartesian); REQUIRE(e_energyDensity.dataOrder() == Mesh::DataOrder::C); REQUIRE(e_energyDensity.axisLabels() == al); - REQUIRE(e_energyDensity.gridSpacing< float >() == gs); + REQUIRE(e_energyDensity.gridSpacing() == gs); REQUIRE(e_energyDensity.gridGlobalOffset() == ggo); REQUIRE(e_energyDensity.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(e_energyDensity.unitDimension() == ud); - REQUIRE(e_energyDensity.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE( + e_energyDensity.timeOffset() == static_cast(0.0f)); REQUIRE(e_energyDensity.size() == 1); REQUIRE(e_energyDensity.count(MeshRecordComponent::SCALAR) == 1); - MeshRecordComponent& e_energyDensity_scalar = e_energyDensity[MeshRecordComponent::SCALAR]; + MeshRecordComponent &e_energyDensity_scalar = + e_energyDensity[MeshRecordComponent::SCALAR]; REQUIRE(e_energyDensity_scalar.unitSI() == 1.0146696675429705e+18); - REQUIRE(e_energyDensity_scalar.position< float >() == p); + REQUIRE(e_energyDensity_scalar.position() == p); REQUIRE(e_energyDensity_scalar.getDatatype() == Datatype::FLOAT); REQUIRE(e_energyDensity_scalar.getExtent() == e); REQUIRE(e_energyDensity_scalar.getDimensionality() == 3); @@ -2862,7 +3324,7 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(i.particles.size() == 1); REQUIRE(i.particles.count("e") == 1); - ParticleSpecies& species_e = i.particles["e"]; + ParticleSpecies &species_e = i.particles["e"]; REQUIRE(species_e.size() == 6); REQUIRE(species_e.count("charge") == 1); @@ -2873,143 +3335,150 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(species_e.count("positionOffset") == 1); REQUIRE(species_e.count("weighting") == 1); - ud = {{0., 0., 1., 1., 0., 0., 0.}}; - Record& e_charge = species_e["charge"]; + ud = {{0., 0., 1., 1., 0., 0., 0.}}; + Record &e_charge = species_e["charge"]; REQUIRE(e_charge.unitDimension() == ud); - REQUIRE(e_charge.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_charge.timeOffset() == static_cast(0.0f)); REQUIRE(e_charge.size() == 1); REQUIRE(e_charge.count(RecordComponent::SCALAR) == 1); e = {2150400}; - RecordComponent& e_charge_scalar = e_charge[RecordComponent::SCALAR]; + RecordComponent &e_charge_scalar = e_charge[RecordComponent::SCALAR]; REQUIRE(e_charge_scalar.unitSI() == 4.7980045488500004e-15); REQUIRE(e_charge_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(e_charge_scalar.getExtent() == e); REQUIRE(e_charge_scalar.getDimensionality() == 1); - ud = {{0., 1., 0., 0., 0., 0., 0.}}; - Record& e_mass = species_e["mass"]; + ud = {{0., 1., 0., 0., 0., 0., 0.}}; + Record &e_mass = species_e["mass"]; REQUIRE(e_mass.unitDimension() == ud); - REQUIRE(e_mass.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_mass.timeOffset() == static_cast(0.0f)); REQUIRE(e_mass.size() == 1); REQUIRE(e_mass.count(RecordComponent::SCALAR) == 1); - RecordComponent& e_mass_scalar = e_mass[RecordComponent::SCALAR]; + RecordComponent &e_mass_scalar = e_mass[RecordComponent::SCALAR]; REQUIRE(e_mass_scalar.unitSI() == 2.7279684799430467e-26); REQUIRE(e_mass_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(e_mass_scalar.getExtent() == e); REQUIRE(e_mass_scalar.getDimensionality() == 1); - ud = {{1., 1., -1., 0., 0., 0., 0.}}; - Record& e_momentum = species_e["momentum"]; + ud = {{1., 1., -1., 0., 0., 0., 0.}}; + Record &e_momentum = species_e["momentum"]; REQUIRE(e_momentum.unitDimension() == ud); - REQUIRE(e_momentum.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_momentum.timeOffset() == static_cast(0.0f)); REQUIRE(e_momentum.size() == 3); REQUIRE(e_momentum.count("x") == 1); REQUIRE(e_momentum.count("y") == 1); REQUIRE(e_momentum.count("z") == 1); - RecordComponent& e_momentum_x = e_momentum["x"]; + RecordComponent &e_momentum_x = e_momentum["x"]; REQUIRE(e_momentum_x.unitSI() == 8.1782437594864961e-18); REQUIRE(e_momentum_x.getDatatype() == Datatype::FLOAT); REQUIRE(e_momentum_x.getExtent() == e); REQUIRE(e_momentum_x.getDimensionality() == 1); - RecordComponent& e_momentum_y = e_momentum["y"]; + RecordComponent &e_momentum_y = e_momentum["y"]; REQUIRE(e_momentum_y.unitSI() == 8.1782437594864961e-18); REQUIRE(e_momentum_y.getDatatype() == Datatype::FLOAT); REQUIRE(e_momentum_y.getExtent() == e); REQUIRE(e_momentum_y.getDimensionality() == 1); - RecordComponent& e_momentum_z = e_momentum["z"]; + RecordComponent &e_momentum_z = e_momentum["z"]; REQUIRE(e_momentum_z.unitSI() == 8.1782437594864961e-18); REQUIRE(e_momentum_z.getDatatype() == Datatype::FLOAT); REQUIRE(e_momentum_z.getExtent() == e); REQUIRE(e_momentum_z.getDimensionality() == 1); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& e_position = species_e["position"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &e_position = species_e["position"]; REQUIRE(e_position.unitDimension() == ud); - REQUIRE(e_position.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_position.timeOffset() == static_cast(0.0f)); REQUIRE(e_position.size() == 3); REQUIRE(e_position.count("x") == 1); REQUIRE(e_position.count("y") == 1); REQUIRE(e_position.count("z") == 1); - RecordComponent& e_position_x = e_position["x"]; + RecordComponent &e_position_x = e_position["x"]; REQUIRE(e_position_x.unitSI() == 2.599999993753294e-07); REQUIRE(e_position_x.getDatatype() == Datatype::FLOAT); REQUIRE(e_position_x.getExtent() == e); REQUIRE(e_position_x.getDimensionality() == 1); - RecordComponent& e_position_y = e_position["y"]; + RecordComponent &e_position_y = e_position["y"]; REQUIRE(e_position_y.unitSI() == 4.4299999435019118e-08); REQUIRE(e_position_y.getDatatype() == Datatype::FLOAT); REQUIRE(e_position_y.getExtent() == e); REQUIRE(e_position_y.getDimensionality() == 1); - RecordComponent& e_position_z = e_position["z"]; + RecordComponent &e_position_z = e_position["z"]; REQUIRE(e_position_z.unitSI() == 2.599999993753294e-07); REQUIRE(e_position_z.getDatatype() == Datatype::FLOAT); REQUIRE(e_position_z.getExtent() == e); REQUIRE(e_position_z.getDimensionality() == 1); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& e_positionOffset = species_e["positionOffset"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &e_positionOffset = species_e["positionOffset"]; REQUIRE(e_positionOffset.unitDimension() == ud); - REQUIRE(e_positionOffset.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE( + e_positionOffset.timeOffset() == static_cast(0.0f)); REQUIRE(e_positionOffset.size() == 3); REQUIRE(e_positionOffset.count("x") == 1); REQUIRE(e_positionOffset.count("y") == 1); REQUIRE(e_positionOffset.count("z") == 1); - RecordComponent& e_positionOffset_x = e_positionOffset["x"]; + RecordComponent &e_positionOffset_x = e_positionOffset["x"]; REQUIRE(e_positionOffset_x.unitSI() == 2.599999993753294e-07); - REQUIRE(e_positionOffset_x.getDatatype() == determineDatatype< int32_t >()); + REQUIRE( + e_positionOffset_x.getDatatype() == determineDatatype()); REQUIRE(e_positionOffset_x.getExtent() == e); REQUIRE(e_positionOffset_x.getDimensionality() == 1); - RecordComponent& e_positionOffset_y = e_positionOffset["y"]; + RecordComponent &e_positionOffset_y = e_positionOffset["y"]; REQUIRE(e_positionOffset_y.unitSI() == 4.4299999435019118e-08); - REQUIRE(e_positionOffset_y.getDatatype() == determineDatatype< int32_t >()); + REQUIRE( + e_positionOffset_y.getDatatype() == determineDatatype()); REQUIRE(e_positionOffset_y.getExtent() == e); REQUIRE(e_positionOffset_y.getDimensionality() == 1); - RecordComponent& e_positionOffset_z = e_positionOffset["z"]; + RecordComponent &e_positionOffset_z = e_positionOffset["z"]; REQUIRE(e_positionOffset_z.unitSI() == 2.599999993753294e-07); - REQUIRE(e_positionOffset_z.getDatatype() == determineDatatype< int32_t >()); + REQUIRE( + e_positionOffset_z.getDatatype() == determineDatatype()); REQUIRE(e_positionOffset_z.getExtent() == e); REQUIRE(e_positionOffset_z.getDimensionality() == 1); - ud = {{0., 0., 0., 0., 0., 0., 0.}}; - Record& e_weighting = species_e["weighting"]; + ud = {{0., 0., 0., 0., 0., 0., 0.}}; + Record &e_weighting = species_e["weighting"]; REQUIRE(e_weighting.unitDimension() == ud); - REQUIRE(e_weighting.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_weighting.timeOffset() == static_cast(0.0f)); REQUIRE(e_weighting.size() == 1); REQUIRE(e_weighting.count(RecordComponent::SCALAR) == 1); - RecordComponent& e_weighting_scalar = e_weighting[RecordComponent::SCALAR]; + RecordComponent &e_weighting_scalar = + e_weighting[RecordComponent::SCALAR]; REQUIRE(e_weighting_scalar.unitSI() == 1.0); REQUIRE(e_weighting_scalar.getDatatype() == Datatype::FLOAT); REQUIRE(e_weighting_scalar.getExtent() == e); REQUIRE(e_weighting_scalar.getDimensionality() == 1); - ParticlePatches& e_patches = species_e.particlePatches; - REQUIRE(e_patches.size() == 4); /* extent, numParticles, numParticlesOffset, offset */ + ParticlePatches &e_patches = species_e.particlePatches; + REQUIRE( + e_patches.size() == + 4); /* extent, numParticles, numParticlesOffset, offset */ REQUIRE(e_patches.count("extent") == 1); REQUIRE(e_patches.count("numParticles") == 1); REQUIRE(e_patches.count("numParticlesOffset") == 1); REQUIRE(e_patches.count("offset") == 1); REQUIRE(e_patches.numPatches() == 4); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - PatchRecord& e_extent = e_patches["extent"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + PatchRecord &e_extent = e_patches["extent"]; REQUIRE(e_extent.unitDimension() == ud); REQUIRE(e_extent.size() == 3); @@ -3017,63 +3486,76 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(e_extent.count("y") == 1); REQUIRE(e_extent.count("z") == 1); - PatchRecordComponent& e_extent_x = e_extent["x"]; + PatchRecordComponent &e_extent_x = e_extent["x"]; REQUIRE(e_extent_x.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_extent_x.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_extent_x.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_extent_x.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_extent_x.getDatatype(), determineDatatype())); - PatchRecordComponent& e_extent_y = e_extent["y"]; + PatchRecordComponent &e_extent_y = e_extent["y"]; REQUIRE(e_extent_y.unitSI() == 4.429999943501912e-08); #if !defined(_MSC_VER) - REQUIRE(e_extent_y.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_extent_y.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_extent_y.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_extent_y.getDatatype(), determineDatatype())); - PatchRecordComponent& e_extent_z = e_extent["z"]; + PatchRecordComponent &e_extent_z = e_extent["z"]; REQUIRE(e_extent_z.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_extent_z.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_extent_z.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_extent_z.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_extent_z.getDatatype(), determineDatatype())); - std::vector< uint64_t > data( e_patches.size() ); + std::vector data(e_patches.size()); e_extent_z.load(shareRaw(data.data())); species_e.seriesFlush(); - REQUIRE(data.at(0) == static_cast< uint64_t >(80)); - REQUIRE(data.at(1) == static_cast< uint64_t >(80)); - REQUIRE(data.at(2) == static_cast< uint64_t >(80)); - REQUIRE(data.at(3) == static_cast< uint64_t >(80)); + REQUIRE(data.at(0) == static_cast(80)); + REQUIRE(data.at(1) == static_cast(80)); + REQUIRE(data.at(2) == static_cast(80)); + REQUIRE(data.at(3) == static_cast(80)); - PatchRecord& e_numParticles = e_patches["numParticles"]; + PatchRecord &e_numParticles = e_patches["numParticles"]; REQUIRE(e_numParticles.size() == 1); REQUIRE(e_numParticles.count(RecordComponent::SCALAR) == 1); - PatchRecordComponent& e_numParticles_scalar = e_numParticles[RecordComponent::SCALAR]; + PatchRecordComponent &e_numParticles_scalar = + e_numParticles[RecordComponent::SCALAR]; #if !defined(_MSC_VER) - REQUIRE(e_numParticles_scalar.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE( + e_numParticles_scalar.getDatatype() == + determineDatatype()); #endif - REQUIRE(isSame(e_numParticles_scalar.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE(isSame( + e_numParticles_scalar.getDatatype(), + determineDatatype())); e_numParticles_scalar.load(shareRaw(data.data())); o.flush(); - REQUIRE(data.at(0) == static_cast< uint64_t >(512000)); - REQUIRE(data.at(1) == static_cast< uint64_t >(819200)); - REQUIRE(data.at(2) == static_cast< uint64_t >(819200)); - REQUIRE(data.at(3) == static_cast< uint64_t >(0)); + REQUIRE(data.at(0) == static_cast(512000)); + REQUIRE(data.at(1) == static_cast(819200)); + REQUIRE(data.at(2) == static_cast(819200)); + REQUIRE(data.at(3) == static_cast(0)); - PatchRecord& e_numParticlesOffset = e_patches["numParticlesOffset"]; + PatchRecord &e_numParticlesOffset = e_patches["numParticlesOffset"]; REQUIRE(e_numParticlesOffset.size() == 1); REQUIRE(e_numParticlesOffset.count(RecordComponent::SCALAR) == 1); - PatchRecordComponent& e_numParticlesOffset_scalar = e_numParticlesOffset[RecordComponent::SCALAR]; + PatchRecordComponent &e_numParticlesOffset_scalar = + e_numParticlesOffset[RecordComponent::SCALAR]; #if !defined(_MSC_VER) - REQUIRE(e_numParticlesOffset_scalar.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE( + e_numParticlesOffset_scalar.getDatatype() == + determineDatatype()); #endif - REQUIRE(isSame(e_numParticlesOffset_scalar.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE(isSame( + e_numParticlesOffset_scalar.getDatatype(), + determineDatatype())); - PatchRecord& e_offset = e_patches["offset"]; + PatchRecord &e_offset = e_patches["offset"]; REQUIRE(e_offset.unitDimension() == ud); REQUIRE(e_offset.size() == 3); @@ -3081,71 +3563,77 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(e_offset.count("y") == 1); REQUIRE(e_offset.count("z") == 1); - PatchRecordComponent& e_offset_x = e_offset["x"]; + PatchRecordComponent &e_offset_x = e_offset["x"]; REQUIRE(e_offset_x.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_offset_x.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_offset_x.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_offset_x.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_offset_x.getDatatype(), determineDatatype())); - PatchRecordComponent& e_offset_y = e_offset["y"]; + PatchRecordComponent &e_offset_y = e_offset["y"]; REQUIRE(e_offset_y.unitSI() == 4.429999943501912e-08); #if !defined(_MSC_VER) - REQUIRE(e_offset_y.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_offset_y.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_offset_y.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_offset_y.getDatatype(), determineDatatype())); e_offset_y.load(shareRaw(data.data())); o.flush(); - REQUIRE(data.at(0) == static_cast< uint64_t >(0)); - REQUIRE(data.at(1) == static_cast< uint64_t >(128)); - REQUIRE(data.at(2) == static_cast< uint64_t >(256)); - REQUIRE(data.at(3) == static_cast< uint64_t >(384)); + REQUIRE(data.at(0) == static_cast(0)); + REQUIRE(data.at(1) == static_cast(128)); + REQUIRE(data.at(2) == static_cast(256)); + REQUIRE(data.at(3) == static_cast(384)); - PatchRecordComponent& e_offset_z = e_offset["z"]; + PatchRecordComponent &e_offset_z = e_offset["z"]; REQUIRE(e_offset_z.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_offset_z.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_offset_z.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_offset_z.getDatatype(), determineDatatype< uint64_t >())); - } catch (no_such_file_error& e) + REQUIRE( + isSame(e_offset_z.getDatatype(), determineDatatype())); + } + catch (no_such_file_error &e) { std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "hdf5_bool_test", "[serial][hdf5]" ) +TEST_CASE("hdf5_bool_test", "[serial][hdf5]") { bool_test("h5"); } -TEST_CASE( "hdf5_patch_test", "[serial][hdf5]" ) +TEST_CASE("hdf5_patch_test", "[serial][hdf5]") { patch_test("h5"); } -TEST_CASE( "hdf5_deletion_test", "[serial][hdf5]" ) +TEST_CASE("hdf5_deletion_test", "[serial][hdf5]") { deletion_test("h5"); } #else -TEST_CASE( "no_serial_hdf5", "[serial][hdf5]" ) +TEST_CASE("no_serial_hdf5", "[serial][hdf5]") { REQUIRE(true); } #endif #if openPMD_HAVE_ADIOS1 -TEST_CASE( "hzdr_adios1_sample_content_test", "[serial][adios1]" ) +TEST_CASE("hzdr_adios1_sample_content_test", "[serial][adios1]") { // since this file might not be publicly available, gracefully handle errors - /** @todo add bp example files to https://github.com/openPMD/openPMD-example-datasets */ + /** @todo add bp example files to + * https://github.com/openPMD/openPMD-example-datasets */ try { /* HZDR: /bigdata/hplsim/development/huebl/lwfa-bgfield-001 * DOI:10.14278/rodare.57 */ - Series o = Series("../samples/hzdr-sample/bp/checkpoint_%T.bp", Access::READ_ONLY); + Series o = Series( + "../samples/hzdr-sample/bp/checkpoint_%T.bp", Access::READ_ONLY); REQUIRE(o.openPMD() == "1.0.0"); REQUIRE(o.openPMDextension() == 1); @@ -3162,136 +3650,145 @@ TEST_CASE( "hzdr_adios1_sample_content_test", "[serial][adios1]" ) REQUIRE(o.iterations.size() >= 1); REQUIRE(o.iterations.count(0) == 1); - Iteration& i = o.iterations[0]; - REQUIRE(i.time< float >() == static_cast< float >(0.0f)); - REQUIRE(i.dt< float >() == static_cast< float >(1.0f)); + Iteration &i = o.iterations[0]; + REQUIRE(i.time() == static_cast(0.0f)); + REQUIRE(i.dt() == static_cast(1.0f)); REQUIRE(i.timeUnitSI() == 1.3899999999999999e-16); REQUIRE(i.meshes.count("B") == 1); REQUIRE(i.meshes.count("E") == 1); REQUIRE(i.meshes.size() == 2); - std::vector< std::string > al{"z", "y", "x"}; - std::vector< float > gs{static_cast< float >(4.252342224121094f), - static_cast< float >(1.0630855560302734f), - static_cast< float >(4.252342224121094f)}; - std::vector< double > ggo{0., 0., 0.}; - std::array< double, 7 > ud{{0., 1., -2., -1., 0., 0., 0.}}; - Mesh& B = i.meshes["B"]; + std::vector al{"z", "y", "x"}; + std::vector gs{ + static_cast(4.252342224121094f), + static_cast(1.0630855560302734f), + static_cast(4.252342224121094f)}; + std::vector ggo{0., 0., 0.}; + std::array ud{{0., 1., -2., -1., 0., 0., 0.}}; + Mesh &B = i.meshes["B"]; REQUIRE(B.geometry() == Mesh::Geometry::cartesian); REQUIRE(B.dataOrder() == Mesh::DataOrder::C); REQUIRE(B.axisLabels() == al); - REQUIRE(B.gridSpacing< float >() == gs); + REQUIRE(B.gridSpacing() == gs); REQUIRE(B.gridGlobalOffset() == ggo); REQUIRE(B.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(B.unitDimension() == ud); - REQUIRE(B.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(B.timeOffset() == static_cast(0.0f)); REQUIRE(B.size() == 3); REQUIRE(B.count("x") == 1); REQUIRE(B.count("y") == 1); REQUIRE(B.count("z") == 1); - std::vector< float > p{static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.5f)}; + std::vector p{ + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.5f)}; Extent e{192, 512, 192}; - MeshRecordComponent& B_x = B["x"]; + MeshRecordComponent &B_x = B["x"]; REQUIRE(B_x.unitSI() == 40903.82224060171); - REQUIRE(B_x.position< float >() == p); + REQUIRE(B_x.position() == p); REQUIRE(B_x.getDatatype() == Datatype::FLOAT); REQUIRE(B_x.getExtent() == e); REQUIRE(B_x.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& B_y = B["y"]; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &B_y = B["y"]; REQUIRE(B_y.unitSI() == 40903.82224060171); - REQUIRE(B_y.position< float >() == p); + REQUIRE(B_y.position() == p); REQUIRE(B_y.getDatatype() == Datatype::FLOAT); REQUIRE(B_y.getExtent() == e); REQUIRE(B_y.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& B_z = B["z"]; + p = { + static_cast(0.5f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &B_z = B["z"]; REQUIRE(B_z.unitSI() == 40903.82224060171); - REQUIRE(B_z.position< float >() == p); + REQUIRE(B_z.position() == p); REQUIRE(B_z.getDatatype() == Datatype::FLOAT); REQUIRE(B_z.getExtent() == e); REQUIRE(B_z.getDimensionality() == 3); - ud = {{1., 1., -3., -1., 0., 0., 0.}}; - Mesh& E = i.meshes["E"]; + ud = {{1., 1., -3., -1., 0., 0., 0.}}; + Mesh &E = i.meshes["E"]; REQUIRE(E.geometry() == Mesh::Geometry::cartesian); REQUIRE(E.dataOrder() == Mesh::DataOrder::C); REQUIRE(E.axisLabels() == al); - REQUIRE(E.gridSpacing< float >() == gs); + REQUIRE(E.gridSpacing() == gs); REQUIRE(E.gridGlobalOffset() == ggo); REQUIRE(E.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(E.unitDimension() == ud); - REQUIRE(E.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(E.timeOffset() == static_cast(0.0f)); REQUIRE(E.size() == 3); REQUIRE(E.count("x") == 1); REQUIRE(E.count("y") == 1); REQUIRE(E.count("z") == 1); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.0f)}; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.0f)}; e = {192, 512, 192}; - MeshRecordComponent& E_x = E["x"]; + MeshRecordComponent &E_x = E["x"]; REQUIRE(E_x.unitSI() == 12262657411105.05); - REQUIRE(E_x.position< float >() == p); + REQUIRE(E_x.position() == p); REQUIRE(E_x.getDatatype() == Datatype::FLOAT); REQUIRE(E_x.getExtent() == e); REQUIRE(E_x.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& E_y = E["y"]; + p = { + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &E_y = E["y"]; REQUIRE(E_y.unitSI() == 12262657411105.05); - REQUIRE(E_y.position< float >() == p); + REQUIRE(E_y.position() == p); REQUIRE(E_y.getDatatype() == Datatype::FLOAT); REQUIRE(E_y.getExtent() == e); REQUIRE(E_y.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& E_z = E["z"]; + p = { + static_cast(0.0f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &E_z = E["z"]; REQUIRE(E_z.unitSI() == 12262657411105.05); - REQUIRE(E_z.position< float >() == p); + REQUIRE(E_z.position() == p); REQUIRE(E_z.getDatatype() == Datatype::FLOAT); REQUIRE(E_z.getExtent() == e); REQUIRE(E_z.getDimensionality() == 3); REQUIRE(i.particles.empty()); - float actual[3][3][3] = {{{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; + float actual[3][3][3] = { + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; Offset offset{20, 20, 150}; Extent extent{3, 3, 3}; auto data = B_z.loadChunk(offset, extent); o.flush(); - float* raw_ptr = data.get(); + float *raw_ptr = data.get(); - for( int a = 0; a < 3; ++a ) - for( int b = 0; b < 3; ++b ) - for( int c = 0; c < 3; ++c ) - REQUIRE(raw_ptr[((a*3) + b)*3 + c] == actual[a][b][c]); - } catch (no_such_file_error& e) + for (int a = 0; a < 3; ++a) + for (int b = 0; b < 3; ++b) + for (int c = 0; c < 3; ++c) + REQUIRE(raw_ptr[((a * 3) + b) * 3 + c] == actual[a][b][c]); + } + catch (no_such_file_error &e) { std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; return; @@ -3299,21 +3796,22 @@ TEST_CASE( "hzdr_adios1_sample_content_test", "[serial][adios1]" ) } #else -TEST_CASE( "no_serial_adios1", "[serial][adios]") +TEST_CASE("no_serial_adios1", "[serial][adios]") { REQUIRE(true); } #endif #if openPMD_HAVE_ADIOS2 -TEST_CASE( "git_adios2_early_chunk_query", "[serial][adios2]" ) +TEST_CASE("git_adios2_early_chunk_query", "[serial][adios2]") { - git_early_chunk_query("../samples/git-sample/3d-bp4/example-3d-bp4_%T.bp", "e", 600); + git_early_chunk_query( + "../samples/git-sample/3d-bp4/example-3d-bp4_%T.bp", "e", 600); } -TEST_CASE( "serial_adios2_json_config", "[serial][adios2]" ) +TEST_CASE("serial_adios2_json_config", "[serial][adios2]") { - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; @@ -3415,32 +3913,31 @@ TEST_CASE( "serial_adios2_json_config", "[serial][adios2]" ) } } )END"; - auto const write = [ &datasetConfig ]( - std::string const & filename, - std::string const & config ) { - openPMD::Series series( filename, openPMD::Access::CREATE, config ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - openPMD::Dataset ds( openPMD::Datatype::INT, { 1000 } ); - E_x.resetDataset( ds ); - std::vector< int > data( 1000, 0 ); - E_x.storeChunk( data, { 0 }, { 1000 } ); - - auto E_y = series.iterations[ 0 ].meshes[ "E" ][ "y" ]; + auto const write = [&datasetConfig]( + std::string const &filename, + std::string const &config) { + openPMD::Series series(filename, openPMD::Access::CREATE, config); + auto E_x = series.iterations[0].meshes["E"]["x"]; + openPMD::Dataset ds(openPMD::Datatype::INT, {1000}); + E_x.resetDataset(ds); + std::vector data(1000, 0); + E_x.storeChunk(data, {0}, {1000}); + + auto E_y = series.iterations[0].meshes["E"]["y"]; // let's override the global compression settings ds.options = datasetConfig; - E_y.resetDataset( ds ); - E_y.storeChunk( data, { 0 }, { 1000 } ); + E_y.resetDataset(ds); + E_y.storeChunk(data, {0}, {1000}); series.flush(); }; - write( "../samples/jsonConfiguredBP4.bp", writeConfigBP4 ); - write( "../samples/jsonConfiguredBP3.bp", writeConfigBP3 ); - write( "../samples/jsonConfiguredNull.bp", writeConfigNull ); + write("../samples/jsonConfiguredBP4.bp", writeConfigBP4); + write("../samples/jsonConfiguredBP3.bp", writeConfigBP3); + write("../samples/jsonConfiguredNull.bp", writeConfigNull); // BP3 engine writes files, BP4 writes directories - REQUIRE( - openPMD::auxiliary::file_exists( "../samples/jsonConfiguredBP3.bp" ) ); - REQUIRE( openPMD::auxiliary::directory_exists( - "../samples/jsonConfiguredBP4.bp" ) ); + REQUIRE(openPMD::auxiliary::file_exists("../samples/jsonConfiguredBP3.bp")); + REQUIRE(openPMD::auxiliary::directory_exists( + "../samples/jsonConfiguredBP4.bp")); std::string readConfigBP3 = R"END( { @@ -3462,92 +3959,94 @@ TEST_CASE( "serial_adios2_json_config", "[serial][adios2]" ) } } )END"; - auto const read = []( std::string const & filename, - std::string const & config ) { + auto const read = [](std::string const &filename, + std::string const &config) { // let's write the config to a file and read it from there std::fstream file; - file.open( "../samples/read_config.json", std::ios_base::out ); + file.open("../samples/read_config.json", std::ios_base::out); file << config; file.flush(); openPMD::Series series( - filename, openPMD::Access::READ_ONLY, - " @ ../samples/read_config.json " ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == 1000 ); - auto chunk = E_x.loadChunk< int >( { 0 }, { 1000 } ); + filename, + openPMD::Access::READ_ONLY, + " @ ../samples/read_config.json "); + auto E_x = series.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == 1000); + auto chunk = E_x.loadChunk({0}, {1000}); series.flush(); - for( size_t i = 0; i < 1000; ++i ) + for (size_t i = 0; i < 1000; ++i) { - REQUIRE( chunk.get()[ i ] == 0 ); + REQUIRE(chunk.get()[i] == 0); } - auto E_y = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_y.getDimensionality() == 1 ); - REQUIRE( E_y.getExtent()[ 0 ] == 1000 ); - chunk = E_y.loadChunk< int >( { 0 }, { 1000 } ); + auto E_y = series.iterations[0].meshes["E"]["x"]; + REQUIRE(E_y.getDimensionality() == 1); + REQUIRE(E_y.getExtent()[0] == 1000); + chunk = E_y.loadChunk({0}, {1000}); series.flush(); - for( size_t i = 0; i < 1000; ++i ) + for (size_t i = 0; i < 1000; ++i) { - REQUIRE( chunk.get()[ i ] == 0 ); + REQUIRE(chunk.get()[i] == 0); } }; - read( "../samples/jsonConfiguredBP3.bp", readConfigBP3 ); - read( "../samples/jsonConfiguredBP4.bp", readConfigBP4 ); + read("../samples/jsonConfiguredBP3.bp", readConfigBP3); + read("../samples/jsonConfiguredBP4.bp", readConfigBP4); } -void -bp4_steps( std::string const & file, std::string const & options_write, std::string const & options_read ) +void bp4_steps( + std::string const &file, + std::string const &options_write, + std::string const &options_read) { { - Series writeSeries( file, Access::CREATE, options_write ); + Series writeSeries(file, Access::CREATE, options_write); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E = iteration.meshes[ "E" ]; - auto E_x = E[ "x" ]; + auto iteration = iterations[i]; + auto E = iteration.meshes["E"]; + auto E_x = E["x"]; E.setAttribute( "vector_of_string", - std::vector< std::string >{ "vector", "of", "string" } ); - E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 10 } ) ); - std::vector< int > data( 10, i ); - E_x.storeChunk( data, { 0 }, { 10 } ); + std::vector{"vector", "of", "string"}); + E_x.resetDataset(openPMD::Dataset(openPMD::Datatype::INT, {10})); + std::vector data(10, i); + E_x.storeChunk(data, {0}, {10}); iteration.close(); } } - if( options_read.empty() ) + if (options_read.empty()) { return; } - Series readSeries( file, Access::READ_ONLY, options_read ); + Series readSeries(file, Access::READ_ONLY, options_read); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E = iteration.meshes[ "E" ]; - auto E_x = E[ "x" ]; + auto E = iteration.meshes["E"]; + auto E_x = E["x"]; REQUIRE( - E.getAttribute( "vector_of_string" ) - .get< std::vector< std::string > >() == - std::vector< std::string >{ "vector", "of", "string" } ); - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == 10 ); - auto chunk = E_x.loadChunk< int >( { 0 }, { 10 } ); + E.getAttribute("vector_of_string") + .get >() == + std::vector{"vector", "of", "string"}); + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == 10); + auto chunk = E_x.loadChunk({0}, {10}); iteration.close(); // @todo replace with ::close() - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } -TEST_CASE( "bp4_steps", "[serial][adios2]" ) +TEST_CASE("bp4_steps", "[serial][adios2]") { std::string useSteps = R"( { @@ -3581,12 +4080,12 @@ TEST_CASE( "bp4_steps", "[serial][adios2]" ) } )"; // sing the yes no song - bp4_steps( "../samples/bp4steps_yes_yes.bp", useSteps, useSteps ); - bp4_steps( "../samples/bp4steps_no_yes.bp", dontUseSteps, useSteps ); - bp4_steps( "../samples/bp4steps_yes_no.bp", useSteps, dontUseSteps ); - bp4_steps( "../samples/bp4steps_no_no.bp", dontUseSteps, dontUseSteps ); - bp4_steps( "../samples/nullcore.bp", nullcore, "" ); - bp4_steps( "../samples/bp4steps_default.bp", "{}", "{}" ); + bp4_steps("../samples/bp4steps_yes_yes.bp", useSteps, useSteps); + bp4_steps("../samples/bp4steps_no_yes.bp", dontUseSteps, useSteps); + bp4_steps("../samples/bp4steps_yes_no.bp", useSteps, dontUseSteps); + bp4_steps("../samples/bp4steps_no_no.bp", dontUseSteps, dontUseSteps); + bp4_steps("../samples/nullcore.bp", nullcore, ""); + bp4_steps("../samples/bp4steps_default.bp", "{}", "{}"); /* * Do this whole thing once more, but this time use the new attribute @@ -3615,110 +4114,107 @@ TEST_CASE( "bp4_steps", "[serial][adios2]" ) } )"; // sing the yes no song - bp4_steps( "../samples/newlayout_bp4steps_yes_yes.bp", useSteps, useSteps ); + bp4_steps("../samples/newlayout_bp4steps_yes_yes.bp", useSteps, useSteps); bp4_steps( - "../samples/newlayout_bp4steps_yes_no.bp", useSteps, dontUseSteps ); + "../samples/newlayout_bp4steps_yes_no.bp", useSteps, dontUseSteps); bp4_steps( - "../samples/newlayout_bp4steps_no_yes.bp", dontUseSteps, useSteps ); + "../samples/newlayout_bp4steps_no_yes.bp", dontUseSteps, useSteps); bp4_steps( - "../samples/newlayout_bp4steps_no_no.bp", dontUseSteps, dontUseSteps ); + "../samples/newlayout_bp4steps_no_no.bp", dontUseSteps, dontUseSteps); } #endif -void -serial_iterator( std::string const & file ) +void serial_iterator(std::string const &file) { constexpr Extent::value_type extent = 1000; { - Series writeSeries( file, Access::CREATE ); + Series writeSeries(file, Access::CREATE); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 1000 } ) ); - std::vector< int > data( 1000, i ); - E_x.storeChunk( data, { 0 }, { 1000 } ); + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset(openPMD::Dataset(openPMD::Datatype::INT, {1000})); + std::vector data(1000, i); + E_x.storeChunk(data, {0}, {1000}); iteration.close(); } } - Series readSeries( file, Access::READ_ONLY ); + Series readSeries(file, Access::READ_ONLY); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); iteration.close(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } -TEST_CASE( "serial_iterator", "[serial][adios2]" ) +TEST_CASE("serial_iterator", "[serial][adios2]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - serial_iterator( "../samples/serial_iterator_filebased_%T." + t ); - serial_iterator( "../samples/serial_iterator_groupbased." + t ); + serial_iterator("../samples/serial_iterator_filebased_%T." + t); + serial_iterator("../samples/serial_iterator_groupbased." + t); } } -void -variableBasedSingleIteration( std::string const & file ) +void variableBasedSingleIteration(std::string const &file) { constexpr Extent::value_type extent = 1000; { - Series writeSeries( file, Access::CREATE ); - writeSeries.setIterationEncoding( IterationEncoding::variableBased ); + Series writeSeries(file, Access::CREATE); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); auto iterations = writeSeries.writeIterations(); - auto iteration = writeSeries.iterations[ 0 ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 1000 } ) ); - std::vector< int > data( 1000, 0 ); - std::iota( data.begin(), data.end(), 0 ); - E_x.storeChunk( data, { 0 }, { 1000 } ); + auto iteration = writeSeries.iterations[0]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset(openPMD::Dataset(openPMD::Datatype::INT, {1000})); + std::vector data(1000, 0); + std::iota(data.begin(), data.end(), 0); + E_x.storeChunk(data, {0}, {1000}); writeSeries.flush(); } { - Series readSeries( file, Access::READ_ONLY ); + Series readSeries(file, Access::READ_ONLY); - auto E_x = readSeries.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = readSeries.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); readSeries.flush(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int( i ) ); + REQUIRE(chunk.get()[i] == int(i)); } } } -TEST_CASE( "variableBasedSingleIteration", "[serial][adios2]" ) +TEST_CASE("variableBasedSingleIteration", "[serial][adios2]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - variableBasedSingleIteration( "../samples/variableBasedSingleIteration." + t ); + variableBasedSingleIteration( + "../samples/variableBasedSingleIteration." + t); } } namespace epsilon { -template< typename T > +template struct AreEqual { - static bool areEqual( T float1, T float2 ) + static bool areEqual(T float1, T float2) { #if 0 printf( @@ -3729,232 +4225,229 @@ struct AreEqual std::abs( float1 - float2 ) <= std::numeric_limits< T >::epsilon() ); #endif - return std::abs( float1 - float2 ) <= - std::numeric_limits< T >::epsilon(); + return std::abs(float1 - float2) <= std::numeric_limits::epsilon(); } }; -template< typename T > -struct AreEqual< std::vector< T > > +template +struct AreEqual > { - static bool areEqual( std::vector< T > v1, std::vector< T > v2 ) + static bool areEqual(std::vector v1, std::vector v2) { return v1.size() == v2.size() && - std::equal( - v1.begin(), v1.end(), v2.begin(), AreEqual< T >::areEqual ); + std::equal(v1.begin(), v1.end(), v2.begin(), AreEqual::areEqual); } }; -template< typename T > -bool areEqual( T a, T b ) +template +bool areEqual(T a, T b) { - return AreEqual< T >::areEqual( std::move( a ), std::move( b ) ); -} + return AreEqual::areEqual(std::move(a), std::move(b)); } +} // namespace epsilon #if openPMD_HAVE_ADIOS2 -TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) +TEST_CASE("git_adios2_sample_test", "[serial][adios2]") { using namespace epsilon; - using vecstring = std::vector< std::string >; - using vecdouble = std::vector< double >; - using arr7 = std::array< double, 7 >; + using vecstring = std::vector; + using vecdouble = std::vector; + using arr7 = std::array; std::string const samplePath = "../samples/git-sample/3d-bp4/example-3d-bp4.bp"; - if( !auxiliary::directory_exists( samplePath ) ) + if (!auxiliary::directory_exists(samplePath)) { - std::cerr << "git sample '" - << samplePath << "' not accessible \n"; + std::cerr << "git sample '" << samplePath << "' not accessible \n"; return; } - Series o( samplePath, Access::READ_ONLY ); - REQUIRE( o.openPMD() == "1.1.0" ); - REQUIRE( o.openPMDextension() == 0 ); - REQUIRE( o.basePath() == "/data/%T/" ); - REQUIRE( o.meshesPath() == "fields/" ); - REQUIRE( o.particlesPath() == "particles/" ); - REQUIRE( o.iterationEncoding() == IterationEncoding::groupBased ); - REQUIRE( o.iterationFormat() == "/data/%T/" ); - REQUIRE( o.name() == "example-3d-bp4" ); + Series o(samplePath, Access::READ_ONLY); + REQUIRE(o.openPMD() == "1.1.0"); + REQUIRE(o.openPMDextension() == 0); + REQUIRE(o.basePath() == "/data/%T/"); + REQUIRE(o.meshesPath() == "fields/"); + REQUIRE(o.particlesPath() == "particles/"); + REQUIRE(o.iterationEncoding() == IterationEncoding::groupBased); + REQUIRE(o.iterationFormat() == "/data/%T/"); + REQUIRE(o.name() == "example-3d-bp4"); - REQUIRE( o.iterations.size() == 1 ); - REQUIRE( o.iterations.count( 550 ) == 1 ); + REQUIRE(o.iterations.size() == 1); + REQUIRE(o.iterations.count(550) == 1); - Iteration it = o.iterations[ 550 ]; + Iteration it = o.iterations[550]; - REQUIRE( areEqual( it.time< double >(), 5.5e+02 ) ); - REQUIRE( areEqual( it.timeUnitSI(), 1.39e-16 ) ); + REQUIRE(areEqual(it.time(), 5.5e+02)); + REQUIRE(areEqual(it.timeUnitSI(), 1.39e-16)); REQUIRE( - it.getAttribute( "particleBoundary" ).get< vecstring >() == - vecstring(6, "absorbing" )); + it.getAttribute("particleBoundary").get() == + vecstring(6, "absorbing")); REQUIRE( - it.getAttribute( "particleBoundaryParameters" ).get< vecstring >() == - vecstring(6, "without field correction" )); - REQUIRE( areEqual( - it.getAttribute( "mue0" ).get< float >(), 2.1550322708208114e-04f ) ); - REQUIRE( areEqual( - it.getAttribute( "eps0" ).get< float >(), 4.6403017578125000e+03f ) ); - REQUIRE( areEqual( it.dt< double >(), 1. ) ); - - REQUIRE( it.meshes.size() == 9 ); - REQUIRE( it.meshes.count( "E" ) == 1 ); - REQUIRE( it.meshes.count( "B" ) == 1 ); - REQUIRE( it.meshes.count( "e_all_chargeDensity" ) == 1 ); - REQUIRE( it.meshes.count( "e_all_energyDensity" ) == 1 ); - REQUIRE( it.meshes.count( "e_all_particleMomentumComponent" ) == 1 ); - REQUIRE( it.meshes.count( "i_all_chargeDensity" ) == 1 ); - REQUIRE( it.meshes.count( "i_all_energyDensity" ) == 1 ); - REQUIRE( it.meshes.count( "i_all_particleMomentumComponent" ) == 1 ); + it.getAttribute("particleBoundaryParameters").get() == + vecstring(6, "without field correction")); + REQUIRE(areEqual( + it.getAttribute("mue0").get(), 2.1550322708208114e-04f)); + REQUIRE(areEqual( + it.getAttribute("eps0").get(), 4.6403017578125000e+03f)); + REQUIRE(areEqual(it.dt(), 1.)); + + REQUIRE(it.meshes.size() == 9); + REQUIRE(it.meshes.count("E") == 1); + REQUIRE(it.meshes.count("B") == 1); + REQUIRE(it.meshes.count("e_all_chargeDensity") == 1); + REQUIRE(it.meshes.count("e_all_energyDensity") == 1); + REQUIRE(it.meshes.count("e_all_particleMomentumComponent") == 1); + REQUIRE(it.meshes.count("i_all_chargeDensity") == 1); + REQUIRE(it.meshes.count("i_all_energyDensity") == 1); + REQUIRE(it.meshes.count("i_all_particleMomentumComponent") == 1); // internal PIConGPU restarting information: - REQUIRE( it.meshes.count( "picongpu_idProvider" ) == 1 ); - - Mesh E = it.meshes[ "E" ]; - REQUIRE( E.geometry() == Mesh::Geometry::cartesian ); - REQUIRE( E.dataOrder() == Mesh::DataOrder::C ); - REQUIRE( E.axisLabels() == vecstring{ "z", "y", "x" } ); - REQUIRE( areEqual( - E.gridSpacing< double >(), + REQUIRE(it.meshes.count("picongpu_idProvider") == 1); + + Mesh E = it.meshes["E"]; + REQUIRE(E.geometry() == Mesh::Geometry::cartesian); + REQUIRE(E.dataOrder() == Mesh::DataOrder::C); + REQUIRE(E.axisLabels() == vecstring{"z", "y", "x"}); + REQUIRE(areEqual( + E.gridSpacing(), vecdouble{ 1.7416797876358032e+00, 1.7416797876358032e+00, - 1.7416797876358032e+00 } ) ); - REQUIRE( areEqual( E.gridGlobalOffset(), vecdouble{ 0., 0., 0. } ) ); - REQUIRE( areEqual( E.gridUnitSI(), 5.3662849982000001e-08 ) ); - REQUIRE( E.unitDimension() == arr7{ { 1, 1, -3, -1, 0, 0, 0 } } ); - REQUIRE( areEqual( E.timeOffset< double >(), 0. ) ); - - REQUIRE( E.size() == 3 ); - REQUIRE( E.count( "x" ) == 1 ); - REQUIRE( E.count( "y" ) == 1 ); - REQUIRE( E.count( "z" ) == 1 ); - - MeshRecordComponent E_x = E[ "x" ]; - REQUIRE( E_x.unitSI() == 9.5223987717519668e+12 ); - REQUIRE( E_x.position< double >() == vecdouble{ 0.5, 0., 0. } ); - REQUIRE( E_x.getDatatype() == Datatype::FLOAT ); - REQUIRE( E_x.getExtent() == Extent{ 32, 96, 64 } ); - REQUIRE( E_x.getDimensionality() == 3 ); - - float E_x_data[] = { -5.4223355837166309e-03, -5.5848993360996246e-03, - -5.7896804064512253e-03, -5.5147800594568253e-03, - -5.6304289028048515e-03, -5.8255749754607677e-03, - -5.5910930968821049e-03, -5.7385643012821674e-03, - -5.8903801254928112e-03, -5.3768581710755825e-03, - -5.5543538182973862e-03, -5.7734064757823944e-03, - -5.4399720393121243e-03, -5.5731507018208504e-03, - -5.7369144633412361e-03, -5.5461097508668900e-03, - -5.6645260192453861e-03, -5.8231339789927006e-03, - -5.4240114986896515e-03, -5.5798939429223537e-03, - -5.7610240764915943e-03, -5.4240110330283642e-03, - -5.5275037884712219e-03, -5.7047260925173759e-03, - -5.5050505325198174e-03, -5.6199040263891220e-03, - -5.7577718980610371e-03 }; - auto E_x_loaded = E_x.loadChunk< float >( { 16, 32, 32 }, { 3, 3, 3 } ); + 1.7416797876358032e+00})); + REQUIRE(areEqual(E.gridGlobalOffset(), vecdouble{0., 0., 0.})); + REQUIRE(areEqual(E.gridUnitSI(), 5.3662849982000001e-08)); + REQUIRE(E.unitDimension() == arr7{{1, 1, -3, -1, 0, 0, 0}}); + REQUIRE(areEqual(E.timeOffset(), 0.)); + + REQUIRE(E.size() == 3); + REQUIRE(E.count("x") == 1); + REQUIRE(E.count("y") == 1); + REQUIRE(E.count("z") == 1); + + MeshRecordComponent E_x = E["x"]; + REQUIRE(E_x.unitSI() == 9.5223987717519668e+12); + REQUIRE(E_x.position() == vecdouble{0.5, 0., 0.}); + REQUIRE(E_x.getDatatype() == Datatype::FLOAT); + REQUIRE(E_x.getExtent() == Extent{32, 96, 64}); + REQUIRE(E_x.getDimensionality() == 3); + + float E_x_data[] = {-5.4223355837166309e-03, -5.5848993360996246e-03, + -5.7896804064512253e-03, -5.5147800594568253e-03, + -5.6304289028048515e-03, -5.8255749754607677e-03, + -5.5910930968821049e-03, -5.7385643012821674e-03, + -5.8903801254928112e-03, -5.3768581710755825e-03, + -5.5543538182973862e-03, -5.7734064757823944e-03, + -5.4399720393121243e-03, -5.5731507018208504e-03, + -5.7369144633412361e-03, -5.5461097508668900e-03, + -5.6645260192453861e-03, -5.8231339789927006e-03, + -5.4240114986896515e-03, -5.5798939429223537e-03, + -5.7610240764915943e-03, -5.4240110330283642e-03, + -5.5275037884712219e-03, -5.7047260925173759e-03, + -5.5050505325198174e-03, -5.6199040263891220e-03, + -5.7577718980610371e-03}; + auto E_x_loaded = E_x.loadChunk({16, 32, 32}, {3, 3, 3}); E_x.seriesFlush(); - for( size_t i = 0; i < 27; ++i ) - { - REQUIRE( areEqual( E_x_data[ i ], E_x_loaded.get()[ i ] ) ); - } - - MeshRecordComponent E_y = E[ "y" ]; - REQUIRE( E_y.unitSI() == 9.5223987717519668e+12 ); - REQUIRE( E_y.position< double >() == vecdouble{ 0., 0.5, 0. } ); - REQUIRE( E_y.getDatatype() == Datatype::FLOAT ); - REQUIRE( E_y.getExtent() == Extent{ 32, 96, 64 } ); - REQUIRE( E_y.getDimensionality() == 3 ); - float E_y_data[] = { 1.9600236555561423e-04, 1.9210868049412966e-04, - 1.1112097854493186e-04, 9.0100722445640713e-05, - 1.2735779455397278e-04, 1.2597699242178351e-04, - -4.5422813855111599e-05, 2.8805377951357514e-05, - 8.3214777987450361e-05, 1.3271786156110466e-04, - 1.0011527047026902e-04, 5.8875859394902363e-05, - 2.5147232008748688e-05, 7.1912618295755237e-05, - 6.2157545471563935e-05, -8.6973857833072543e-05, - -8.1858233897946775e-06, -2.2509128029923886e-05, - 6.0511985793709755e-05, 4.9726430006558076e-05, - -1.7196462067659013e-05, -3.0460794732789509e-05, - 5.9892886383750010e-06, -1.4382616200236953e-06, - -1.3747414050158113e-04, -8.0163808888755739e-05, - -3.5486038541421294e-05 }; - auto E_y_loaded = E_y.loadChunk< float >( { 16, 32, 32 }, { 3, 3, 3 } ); + for (size_t i = 0; i < 27; ++i) + { + REQUIRE(areEqual(E_x_data[i], E_x_loaded.get()[i])); + } + + MeshRecordComponent E_y = E["y"]; + REQUIRE(E_y.unitSI() == 9.5223987717519668e+12); + REQUIRE(E_y.position() == vecdouble{0., 0.5, 0.}); + REQUIRE(E_y.getDatatype() == Datatype::FLOAT); + REQUIRE(E_y.getExtent() == Extent{32, 96, 64}); + REQUIRE(E_y.getDimensionality() == 3); + float E_y_data[] = {1.9600236555561423e-04, 1.9210868049412966e-04, + 1.1112097854493186e-04, 9.0100722445640713e-05, + 1.2735779455397278e-04, 1.2597699242178351e-04, + -4.5422813855111599e-05, 2.8805377951357514e-05, + 8.3214777987450361e-05, 1.3271786156110466e-04, + 1.0011527047026902e-04, 5.8875859394902363e-05, + 2.5147232008748688e-05, 7.1912618295755237e-05, + 6.2157545471563935e-05, -8.6973857833072543e-05, + -8.1858233897946775e-06, -2.2509128029923886e-05, + 6.0511985793709755e-05, 4.9726430006558076e-05, + -1.7196462067659013e-05, -3.0460794732789509e-05, + 5.9892886383750010e-06, -1.4382616200236953e-06, + -1.3747414050158113e-04, -8.0163808888755739e-05, + -3.5486038541421294e-05}; + auto E_y_loaded = E_y.loadChunk({16, 32, 32}, {3, 3, 3}); E_y.seriesFlush(); - for( size_t i = 0; i < 27; ++i ) - { - REQUIRE( areEqual( E_y_data[ i ], E_y_loaded.get()[ i ] ) ); - } - - MeshRecordComponent E_z = E[ "z" ]; - REQUIRE( E_z.unitSI() == 9.5223987717519668e+12 ); - REQUIRE( E_z.position< double >() == vecdouble{ 0., 0., 0.5 } ); - REQUIRE( E_z.getDatatype() == Datatype::FLOAT ); - REQUIRE( E_z.getExtent() == Extent{ 32, 96, 64 } ); - REQUIRE( E_z.getDimensionality() == 3 ); - float E_z_data[] = { -1.3665637234225869e-03, -1.3941071229055524e-03, - -1.4618652639910579e-03, -1.4528072206303477e-03, - -1.4355779858306050e-03, -1.4925430295988917e-03, - -1.6604729462414980e-03, -1.5911811497062445e-03, - -1.6420837491750717e-03, -1.1975304223597050e-03, - -1.2183464132249355e-03, -1.3470118865370750e-03, - -1.2645993847399950e-03, -1.2775690993294120e-03, - -1.3621025718748569e-03, -1.4198675053194165e-03, - -1.3927087420597672e-03, -1.3995743356645107e-03, - -9.9509279243648052e-04, -1.0950352298095822e-03, - -1.2131386902183294e-03, -1.0829739039763808e-03, - -1.1384176323190331e-03, -1.2189601548016071e-03, - -1.2028686469420791e-03, -1.1917919619008899e-03, - -1.2309787562116981e-03 }; - auto E_z_loaded = E_z.loadChunk< float >( { 16, 32, 32 }, { 3, 3, 3 } ); + for (size_t i = 0; i < 27; ++i) + { + REQUIRE(areEqual(E_y_data[i], E_y_loaded.get()[i])); + } + + MeshRecordComponent E_z = E["z"]; + REQUIRE(E_z.unitSI() == 9.5223987717519668e+12); + REQUIRE(E_z.position() == vecdouble{0., 0., 0.5}); + REQUIRE(E_z.getDatatype() == Datatype::FLOAT); + REQUIRE(E_z.getExtent() == Extent{32, 96, 64}); + REQUIRE(E_z.getDimensionality() == 3); + float E_z_data[] = {-1.3665637234225869e-03, -1.3941071229055524e-03, + -1.4618652639910579e-03, -1.4528072206303477e-03, + -1.4355779858306050e-03, -1.4925430295988917e-03, + -1.6604729462414980e-03, -1.5911811497062445e-03, + -1.6420837491750717e-03, -1.1975304223597050e-03, + -1.2183464132249355e-03, -1.3470118865370750e-03, + -1.2645993847399950e-03, -1.2775690993294120e-03, + -1.3621025718748569e-03, -1.4198675053194165e-03, + -1.3927087420597672e-03, -1.3995743356645107e-03, + -9.9509279243648052e-04, -1.0950352298095822e-03, + -1.2131386902183294e-03, -1.0829739039763808e-03, + -1.1384176323190331e-03, -1.2189601548016071e-03, + -1.2028686469420791e-03, -1.1917919619008899e-03, + -1.2309787562116981e-03}; + auto E_z_loaded = E_z.loadChunk({16, 32, 32}, {3, 3, 3}); E_z.seriesFlush(); - for( size_t i = 0; i < 27; ++i ) + for (size_t i = 0; i < 27; ++i) { - REQUIRE( areEqual( E_z_data[ i ], E_z_loaded.get()[ i ] ) ); + REQUIRE(areEqual(E_z_data[i], E_z_loaded.get()[i])); } - REQUIRE( it.particles.size() == 2 ); + REQUIRE(it.particles.size() == 2); - REQUIRE( it.particles.count( "e" ) == 1 ); - REQUIRE( it.particles.count( "i" ) == 1 ); + REQUIRE(it.particles.count("e") == 1); + REQUIRE(it.particles.count("i") == 1); - ParticleSpecies electrons = it.particles[ "e" ]; + ParticleSpecies electrons = it.particles["e"]; - REQUIRE( electrons.size() == 6 ); - REQUIRE( electrons.count( "charge" ) == 1 ); - REQUIRE( electrons.count( "mass" ) == 1 ); - REQUIRE( electrons.count( "momentum" ) == 1 ); - REQUIRE( electrons.count( "position" ) == 1 ); - REQUIRE( electrons.count( "positionOffset" ) == 1 ); - REQUIRE( electrons.count( "weighting" ) == 1 ); + REQUIRE(electrons.size() == 6); + REQUIRE(electrons.count("charge") == 1); + REQUIRE(electrons.count("mass") == 1); + REQUIRE(electrons.count("momentum") == 1); + REQUIRE(electrons.count("position") == 1); + REQUIRE(electrons.count("positionOffset") == 1); + REQUIRE(electrons.count("weighting") == 1); - Record charge = electrons[ "charge" ]; - REQUIRE( charge.unitDimension() == arr7{ { 0., 0., 1., 1., 0., 0., 0. } } ); - REQUIRE( charge.timeOffset< double >() == 0.0 ); + Record charge = electrons["charge"]; + REQUIRE(charge.unitDimension() == arr7{{0., 0., 1., 1., 0., 0., 0.}}); + REQUIRE(charge.timeOffset() == 0.0); - REQUIRE( charge.size() == 1 ); - REQUIRE( charge.count( RecordComponent::SCALAR ) == 1 ); + REQUIRE(charge.size() == 1); + REQUIRE(charge.count(RecordComponent::SCALAR) == 1); - RecordComponent & charge_scalar = charge[ RecordComponent::SCALAR ]; - REQUIRE( areEqual( charge_scalar.unitSI(), 5.2323446053125002e-17 ) ); - REQUIRE( charge_scalar.getDatatype() == Datatype::DOUBLE ); - REQUIRE( charge_scalar.getDimensionality() == 1 ); - REQUIRE( charge_scalar.getExtent() == Extent{ 96781 } ); + RecordComponent &charge_scalar = charge[RecordComponent::SCALAR]; + REQUIRE(areEqual(charge_scalar.unitSI(), 5.2323446053125002e-17)); + REQUIRE(charge_scalar.getDatatype() == Datatype::DOUBLE); + REQUIRE(charge_scalar.getDimensionality() == 1); + REQUIRE(charge_scalar.getExtent() == Extent{96781}); double const charge_value = -3.0620612669736147e-3; - REQUIRE( charge_scalar.getAttribute("value").get< double >() == charge_value ); + REQUIRE(charge_scalar.getAttribute("value").get() == charge_value); - Record & mass = electrons[ "mass" ]; - REQUIRE( mass.unitDimension() == arr7{ { 0., 1., 0., 0., 0., 0., 0. } } ); - REQUIRE( mass.timeOffset< double >() == 0.0 ); + Record &mass = electrons["mass"]; + REQUIRE(mass.unitDimension() == arr7{{0., 1., 0., 0., 0., 0., 0.}}); + REQUIRE(mass.timeOffset() == 0.0); - REQUIRE( mass.size() == 1 ); - REQUIRE( mass.count( RecordComponent::SCALAR ) == 1 ); + REQUIRE(mass.size() == 1); + REQUIRE(mass.count(RecordComponent::SCALAR) == 1); - RecordComponent & mass_scalar = mass[ RecordComponent::SCALAR ]; - REQUIRE( areEqual( mass_scalar.unitSI(), 2.9749182215581054e-28 ) ); - REQUIRE( mass_scalar.getDatatype() == Datatype::DOUBLE ); - REQUIRE( mass_scalar.getDimensionality() == 1 ); - REQUIRE( mass_scalar.getExtent() == Extent{ 96781 } ); + RecordComponent &mass_scalar = mass[RecordComponent::SCALAR]; + REQUIRE(areEqual(mass_scalar.unitSI(), 2.9749182215581054e-28)); + REQUIRE(mass_scalar.getDatatype() == Datatype::DOUBLE); + REQUIRE(mass_scalar.getDimensionality() == 1); + REQUIRE(mass_scalar.getExtent() == Extent{96781}); double const mass_value = 3.0620612669736147e-3; - REQUIRE( mass_scalar.getAttribute("value").get< double >() == mass_value ); + REQUIRE(mass_scalar.getAttribute("value").get() == mass_value); float position_x_data[] = { 5.4244494438171387e-01, @@ -3965,20 +4458,17 @@ TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) 3.0412188172340393e-01, 5.9818041324615479e-01, 8.8785779476165771e-01, - 4.2273962497711182e-01 }; + 4.2273962497711182e-01}; auto position_x_loaded = - electrons[ "position" ][ "x" ].loadChunk< float >( { 32 }, { 9 } ); - auto charge_loaded = - charge_scalar.loadChunk< double >( { 32 }, { 9 } ); - auto mass_loaded = - mass_scalar.loadChunk< double >( { 32 }, { 9 } ); + electrons["position"]["x"].loadChunk({32}, {9}); + auto charge_loaded = charge_scalar.loadChunk({32}, {9}); + auto mass_loaded = mass_scalar.loadChunk({32}, {9}); electrons.seriesFlush(); - for( size_t i = 0; i < 9; ++i ) + for (size_t i = 0; i < 9; ++i) { - REQUIRE( - areEqual( position_x_data[ i ], position_x_loaded.get()[ i ] ) ); - REQUIRE( areEqual( charge_value, charge_loaded.get()[ i ] ) ); - REQUIRE( areEqual( mass_value, mass_loaded.get()[ i ] ) ); + REQUIRE(areEqual(position_x_data[i], position_x_loaded.get()[i])); + REQUIRE(areEqual(charge_value, charge_loaded.get()[i])); + REQUIRE(areEqual(mass_value, mass_loaded.get()[i])); } float position_y_data[] = { @@ -3990,14 +4480,13 @@ TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) 2.6738378405570984e-01, 8.2502347230911255e-01, 9.2121642827987671e-01, - 9.0402549505233765e-01 }; + 9.0402549505233765e-01}; auto position_y_loaded = - electrons[ "position" ][ "y" ].loadChunk< float >( { 32 }, { 9 } ); + electrons["position"]["y"].loadChunk({32}, {9}); electrons.seriesFlush(); - for( size_t i = 0; i < 9; ++i ) + for (size_t i = 0; i < 9; ++i) { - REQUIRE( - areEqual( position_y_data[ i ], position_y_loaded.get()[ i ] ) ); + REQUIRE(areEqual(position_y_data[i], position_y_loaded.get()[i])); } float position_z_data[] = { @@ -4009,127 +4498,142 @@ TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) 7.4185878038406372e-01, 4.5986607670783997e-01, 2.2350004315376282e-01, - 5.4723143577575684e-01 }; + 5.4723143577575684e-01}; auto position_z_loaded = - electrons[ "position" ][ "z" ].loadChunk< float >( { 32 }, { 9 } ); + electrons["position"]["z"].loadChunk({32}, {9}); electrons.seriesFlush(); - for( size_t i = 0; i < 9; ++i ) + for (size_t i = 0; i < 9; ++i) { - REQUIRE( - areEqual( position_z_data[ i ], position_z_loaded.get()[ i ] ) ); + REQUIRE(areEqual(position_z_data[i], position_z_loaded.get()[i])); } } -void variableBasedSeries( std::string const & file ) +void variableBasedSeries(std::string const &file) { constexpr Extent::value_type extent = 1000; { - Series writeSeries( file, Access::CREATE ); - writeSeries.setIterationEncoding( IterationEncoding::variableBased ); + Series writeSeries(file, Access::CREATE); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); REQUIRE( - writeSeries.iterationEncoding() == IterationEncoding::variableBased ); - if( writeSeries.backend() == "ADIOS1" ) + writeSeries.iterationEncoding() == + IterationEncoding::variableBased); + if (writeSeries.backend() == "ADIOS1") { return; } auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { openPMD::Datatype::INT, { 1000 } } ); - std::vector< int > data( 1000, i ); - E_x.storeChunk( data, { 0 }, { 1000 } ); + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset({openPMD::Datatype::INT, {1000}}); + std::vector data(1000, i); + E_x.storeChunk(data, {0}, {1000}); + + if (i > 2) + { + iteration.setAttribute( + "iteration_is_larger_than_two", "it truly is"); + } // this tests changing extents and dimensionalities // across iterations - auto E_y = iteration.meshes[ "E" ][ "y" ]; + auto E_y = iteration.meshes["E"]["y"]; unsigned dimensionality = i % 3 + 1; unsigned len = i + 1; - Extent changingExtent( dimensionality, len ); - E_y.resetDataset( { openPMD::Datatype::INT, changingExtent } ); - std::vector< int > changingData( - std::pow( len, dimensionality ), dimensionality ); + Extent changingExtent(dimensionality, len); + E_y.resetDataset({openPMD::Datatype::INT, changingExtent}); + std::vector changingData( + std::pow(len, dimensionality), dimensionality); E_y.storeChunk( - changingData, Offset( dimensionality, 0 ), changingExtent ); + changingData, Offset(dimensionality, 0), changingExtent); // this tests datasets that are present in one iteration, but not // in others - auto E_z = iteration.meshes[ "E" ][ std::to_string( i ) ]; - E_z.resetDataset( { Datatype::INT, { 1 } } ); - E_z.makeConstant( i ); + auto E_z = iteration.meshes["E"][std::to_string(i)]; + E_z.resetDataset({Datatype::INT, {1}}); + E_z.makeConstant(i); // this tests attributes that are present in one iteration, but not // in others - iteration.meshes[ "E" ].setAttribute( - "attr_" + std::to_string( i ), i ); + iteration.meshes["E"].setAttribute("attr_" + std::to_string(i), i); iteration.close(); } } - REQUIRE( auxiliary::directory_exists( file ) ); + REQUIRE(auxiliary::directory_exists(file)); - auto testRead = [ &file, &extent ]( std::string const & jsonConfig ) - { - Series readSeries( file, Access::READ_ONLY, jsonConfig ); + auto testRead = [&file, &extent](std::string const &jsonConfig) { + Series readSeries(file, Access::READ_ONLY, jsonConfig); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + if (iteration.iterationIndex > 2) + { + REQUIRE( + iteration.getAttribute("iteration_is_larger_than_two") + .get() == "it truly is"); + } + else + { + REQUIRE_FALSE(iteration.containsAttribute( + "iteration_is_larger_than_two")); + } + + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); iteration.close(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int( iteration.iterationIndex ) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } - auto E_y = iteration.meshes[ "E" ][ "y" ]; + auto E_y = iteration.meshes["E"]["y"]; unsigned dimensionality = iteration.iterationIndex % 3 + 1; unsigned len = iteration.iterationIndex + 1; - Extent changingExtent( dimensionality, len ); - REQUIRE( E_y.getExtent() == changingExtent ); + Extent changingExtent(dimensionality, len); + REQUIRE(E_y.getExtent() == changingExtent); // this loop ensures that only the recordcomponent ["E"]["i"] is // present where i == iteration.iterationIndex - for( uint64_t otherIteration = 0; otherIteration < 10; - ++otherIteration ) + for (uint64_t otherIteration = 0; otherIteration < 10; + ++otherIteration) { // component is present <=> (otherIteration == i) REQUIRE( - iteration.meshes[ "E" ].contains( - std::to_string( otherIteration ) ) == - ( otherIteration == iteration.iterationIndex ) ); + iteration.meshes["E"].contains( + std::to_string(otherIteration)) == + (otherIteration == iteration.iterationIndex)); REQUIRE( - iteration.meshes[ "E" ].containsAttribute( - "attr_" + std::to_string( otherIteration ) ) == - ( otherIteration == iteration.iterationIndex ) ); + iteration.meshes["E"].containsAttribute( + "attr_" + std::to_string(otherIteration)) == + (otherIteration == iteration.iterationIndex)); } REQUIRE( - iteration - .meshes[ "E" ][ std::to_string( iteration.iterationIndex ) ] - .getAttribute( "value" ) - .get< int >() == int( iteration.iterationIndex ) ); + iteration.meshes["E"][std::to_string(iteration.iterationIndex)] + .getAttribute("value") + .get() == int(iteration.iterationIndex)); REQUIRE( - iteration.meshes[ "E" ] + iteration.meshes["E"] .getAttribute( - "attr_" + std::to_string( iteration.iterationIndex ) ) - .get< int >() == int( iteration.iterationIndex ) ); + "attr_" + std::to_string(iteration.iterationIndex)) + .get() == int(iteration.iterationIndex)); last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); }; - testRead( "{\"defer_iteration_parsing\": true}" ); - testRead( "{\"defer_iteration_parsing\": false}" ); + testRead("{\"defer_iteration_parsing\": true}"); + testRead("{\"defer_iteration_parsing\": false}"); } -TEST_CASE( "variableBasedSeries", "[serial][adios2]" ) +TEST_CASE("variableBasedSeries", "[serial][adios2]") { - variableBasedSeries( "../samples/variableBasedSeries.bp" ); + variableBasedSeries("../samples/variableBasedSeries.bp"); } void variableBasedParticleData() @@ -4140,29 +4644,28 @@ void variableBasedParticleData() { // open file for writing Series series = - Series( "../samples/variableBasedParticles.bp", Access::CREATE ); - series.setIterationEncoding( IterationEncoding::variableBased ); + Series("../samples/variableBasedParticles.bp", Access::CREATE); + series.setIterationEncoding(IterationEncoding::variableBased); - Datatype datatype = determineDatatype< position_t >(); - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Datatype datatype = determineDatatype(); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], + [](position_t const *ptr) { delete[] ptr; }); WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; - std::iota( - local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); } iteration.close(); } @@ -4171,43 +4674,43 @@ void variableBasedParticleData() { // open file for reading Series series = - Series( "../samples/variableBasedParticles.bp", Access::READ_ONLY ); + Series("../samples/variableBasedParticles.bp", Access::READ_ONLY); - for( IndexedIteration iteration : series.readIterations() ) + for (IndexedIteration iteration : series.readIterations()) { - Record electronPositions = iteration.particles[ "e" ][ "position" ]; - std::array< std::shared_ptr< position_t >, 3 > loadedChunks; - std::array< Extent, 3 > extents; - std::array< std::string, 3 > const dimensions{ { "x", "y", "z" } }; + Record electronPositions = iteration.particles["e"]["position"]; + std::array, 3> loadedChunks; + std::array extents; + std::array const dimensions{{"x", "y", "z"}}; - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - RecordComponent rc = electronPositions[ dim ]; - loadedChunks[ i ] = rc.loadChunk< position_t >( - Offset( rc.getDimensionality(), 0 ), rc.getExtent() ); - extents[ i ] = rc.getExtent(); + std::string dim = dimensions[i]; + RecordComponent rc = electronPositions[dim]; + loadedChunks[i] = rc.loadChunk( + Offset(rc.getDimensionality(), 0), rc.getExtent()); + extents[i] = rc.getExtent(); } iteration.close(); - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - Extent const & extent = extents[ i ]; - auto chunk = loadedChunks[ i ]; - for( size_t j = 0; j < extent[ 0 ]; ++j ) + std::string dim = dimensions[i]; + Extent const &extent = extents[i]; + auto chunk = loadedChunks[i]; + for (size_t j = 0; j < extent[0]; ++j) { REQUIRE( - chunk.get()[ j ] == - iteration.iterationIndex * length + j ); + chunk.get()[j] == + iteration.iterationIndex * length + j); } } } } } -TEST_CASE( "variableBasedParticleData", "[serial][adios2]" ) +TEST_CASE("variableBasedParticleData", "[serial][adios2]") { variableBasedParticleData(); } @@ -4215,17 +4718,17 @@ TEST_CASE( "variableBasedParticleData", "[serial][adios2]" ) #if openPMD_HAVE_ADIOS2 #ifdef ADIOS2_HAVE_BZIP2 -TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) +TEST_CASE("automatically_deactivate_span", "[serial][adios2]") { // automatically (de)activate span-based storeChunking { - Series write( "../samples/span_based.bp", Access::CREATE ); - auto E_uncompressed = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - auto E_compressed = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; + Series write("../samples/span_based.bp", Access::CREATE); + auto E_uncompressed = write.iterations[0].meshes["E"]["x"]; + auto E_compressed = write.iterations[0].meshes["E"]["y"]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - E_uncompressed.resetDataset( ds ); + E_uncompressed.resetDataset(ds); std::string compression = R"END( { @@ -4241,26 +4744,25 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) })END"; ds.options = compression; - E_compressed.resetDataset( ds ); + E_compressed.resetDataset(ds); bool spanWorkaround = false; - E_uncompressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_uncompressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( !spanWorkaround ); + REQUIRE(!spanWorkaround); - E_compressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { - spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + E_compressed.storeChunk({0}, {10}, [&spanWorkaround](size_t size) { + spanWorkaround = true; + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( spanWorkaround ); + REQUIRE(spanWorkaround); } // enable span-based API indiscriminately @@ -4271,13 +4773,13 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) "use_span_based_put": true } })END"; - Series write( "../samples/span_based.bp", Access::CREATE, enable ); - auto E_uncompressed = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - auto E_compressed = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; + Series write("../samples/span_based.bp", Access::CREATE, enable); + auto E_uncompressed = write.iterations[0].meshes["E"]["x"]; + auto E_compressed = write.iterations[0].meshes["E"]["y"]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - E_uncompressed.resetDataset( ds ); + E_uncompressed.resetDataset(ds); std::string compression = R"END( { @@ -4293,38 +4795,38 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) })END"; ds.options = compression; - E_compressed.resetDataset( ds ); + E_compressed.resetDataset(ds); bool spanWorkaround = false; - E_uncompressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_uncompressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( !spanWorkaround ); + REQUIRE(!spanWorkaround); try { - E_compressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_compressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); } - catch( std::invalid_argument const & e ) + catch (std::invalid_argument const &e) { /* - * Using the span-based API in combination with compression is - * unsupported in ADIOS2. - * In newer versions of ADIOS2, an error is thrown. - */ + * Using the span-based API in combination with compression is + * unsupported in ADIOS2. + * In newer versions of ADIOS2, an error is thrown. + */ std::cerr << "Ignoring expected error: " << e.what() << std::endl; } - REQUIRE( !spanWorkaround ); + REQUIRE(!spanWorkaround); } // disable span-based API indiscriminately @@ -4335,13 +4837,13 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) "use_span_based_put": false } })END"; - Series write( "../samples/span_based.bp", Access::CREATE, disable ); - auto E_uncompressed = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - auto E_compressed = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; + Series write("../samples/span_based.bp", Access::CREATE, disable); + auto E_uncompressed = write.iterations[0].meshes["E"]["x"]; + auto E_compressed = write.iterations[0].meshes["E"]["y"]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - E_uncompressed.resetDataset( ds ); + E_uncompressed.resetDataset(ds); std::string compression = R"END( { @@ -4357,84 +4859,80 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) })END"; ds.options = compression; - E_compressed.resetDataset( ds ); + E_compressed.resetDataset(ds); bool spanWorkaround = false; - E_uncompressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_uncompressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( spanWorkaround ); + REQUIRE(spanWorkaround); spanWorkaround = false; - E_compressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { - spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + E_compressed.storeChunk({0}, {10}, [&spanWorkaround](size_t size) { + spanWorkaround = true; + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( spanWorkaround ); + REQUIRE(spanWorkaround); } } #endif #endif // @todo Upon switching to ADIOS2 2.7.0, test this the other way around also -void -iterate_nonstreaming_series( - std::string const & file, bool variableBasedLayout ) +void iterate_nonstreaming_series( + std::string const &file, bool variableBasedLayout) { constexpr size_t extent = 100; { - Series writeSeries( file, Access::CREATE ); - if( variableBasedLayout ) + Series writeSeries(file, Access::CREATE); + if (variableBasedLayout) { - if( writeSeries.backend() != "ADIOS2" ) + if (writeSeries.backend() != "ADIOS2") { return; } - writeSeries.setIterationEncoding( - IterationEncoding::variableBased ); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); } // use conventional API to write iterations auto iterations = writeSeries.iterations; - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 2, extent } ) ); - std::vector< int > data( extent, i ); - E_x.storeChunk( data, { 0, 0 }, { 1, extent } ); + openPMD::Dataset(openPMD::Datatype::INT, {2, extent})); + std::vector data(extent, i); + E_x.storeChunk(data, {0, 0}, {1, extent}); bool taskSupportedByBackend = true; - DynamicMemoryView< int > memoryView = E_x.storeChunk< int >( - { 1, 0 }, - { 1, extent }, + DynamicMemoryView memoryView = E_x.storeChunk( + {1, 0}, + {1, extent}, /* * Hijack the functor that is called for buffer creation. * This allows us to check if the backend has explicit support * for buffer creation or if the fallback implementation is * used. */ - [ &taskSupportedByBackend ]( size_t size ) - { + [&taskSupportedByBackend](size_t size) { taskSupportedByBackend = false; - return std::shared_ptr< int >{ - new int[ size ], []( auto * ptr ) { delete[] ptr; } }; - } ); - if( writeSeries.backend() == "ADIOS2" ) + return std::shared_ptr{ + new int[size], [](auto *ptr) { delete[] ptr; }}; + }); + if (writeSeries.backend() == "ADIOS2") { // that backend must support span creation - REQUIRE( taskSupportedByBackend ); + REQUIRE(taskSupportedByBackend); } auto span = memoryView.currentBuffer(); - for( size_t j = 0; j < span.size(); ++j ) + for (size_t j = 0; j < span.size(); ++j) { - span[ j ] = j; + span[j] = j; } /* @@ -4443,17 +4941,17 @@ iterate_nonstreaming_series( */ auto scalarMesh = iteration - .meshes[ "i_energyDensity" ][ MeshRecordComponent::SCALAR ]; - scalarMesh.resetDataset( Dataset( Datatype::INT, { 5 } ) ); + .meshes["i_energyDensity"][MeshRecordComponent::SCALAR]; + scalarMesh.resetDataset(Dataset(Datatype::INT, {5})); auto scalarSpan = - scalarMesh.storeChunk< int >( { 0 }, { 5 } ).currentBuffer(); - for( size_t j = 0; j < scalarSpan.size(); ++j ) + scalarMesh.storeChunk({0}, {5}).currentBuffer(); + for (size_t j = 0; j < scalarSpan.size(); ++j) { - scalarSpan[ j ] = j; + scalarSpan[j] = j; } // we encourage manually closing iterations, but it should not // matter so let's do the switcharoo for this test - if( i % 2 == 0 ) + if (i % 2 == 0) { writeSeries.flush(); } @@ -4464,22 +4962,23 @@ iterate_nonstreaming_series( } } - Series readSeries( file, Access::READ_ONLY, "{\"defer_iteration_parsing\": true}" ); + Series readSeries( + file, Access::READ_ONLY, "{\"defer_iteration_parsing\": true}"); size_t last_iteration_index = 0; // conventionally written Series must be readable with streaming-aware API! - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { // ReadIterations takes care of Iteration::open()ing iterations - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 2 ); - REQUIRE( E_x.getExtent()[ 0 ] == 2 ); - REQUIRE( E_x.getExtent()[ 1 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0, 0 }, { 1, extent } ); - auto chunk2 = E_x.loadChunk< int >( { 1, 0 }, { 1, extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[0] == 2); + REQUIRE(E_x.getExtent()[1] == extent); + auto chunk = E_x.loadChunk({0, 0}, {1, extent}); + auto chunk2 = E_x.loadChunk({1, 0}, {1, extent}); // we encourage manually closing iterations, but it should not matter // so let's do the switcharoo for this test - if( last_iteration_index % 2 == 0 ) + if (last_iteration_index % 2 == 0) { readSeries.flush(); } @@ -4488,203 +4987,201 @@ iterate_nonstreaming_series( iteration.close(); } - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); - REQUIRE( chunk2.get()[ i ] == int(i) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); + REQUIRE(chunk2.get()[i] == int(i)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } -TEST_CASE( "iterate_nonstreaming_series", "[serial][adios2]" ) +TEST_CASE("iterate_nonstreaming_series", "[serial][adios2]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { iterate_nonstreaming_series( - "../samples/iterate_nonstreaming_series_filebased_%T." + t, false ); + "../samples/iterate_nonstreaming_series_filebased_%T." + t, false); iterate_nonstreaming_series( - "../samples/iterate_nonstreaming_series_groupbased." + t, false ); + "../samples/iterate_nonstreaming_series_groupbased." + t, false); iterate_nonstreaming_series( - "../samples/iterate_nonstreaming_series_variablebased." + t, true ); + "../samples/iterate_nonstreaming_series_variablebased." + t, true); } } -void -extendDataset( std::string const & ext ) +void extendDataset(std::string const &ext) { std::string filename = "../samples/extendDataset." + ext; - std::vector< int > data1( 25 ); - std::vector< int > data2( 25 ); - std::iota( data1.begin(), data1.end(), 0 ); - std::iota( data2.begin(), data2.end(), 25 ); + std::vector data1(25); + std::vector data2(25); + std::iota(data1.begin(), data1.end(), 0); + std::iota(data2.begin(), data2.end(), 25); { - Series write( filename, Access::CREATE ); - if( ext == "bp" && write.backend() != "ADIOS2" ) + Series write(filename, Access::CREATE); + if (ext == "bp" && write.backend() != "ADIOS2") { // dataset resizing unsupported in ADIOS1 return; } // only one iteration written anyway - write.setIterationEncoding( IterationEncoding::variableBased ); + write.setIterationEncoding(IterationEncoding::variableBased); - Dataset ds1{ Datatype::INT, { 5, 5 }, "{ \"resizable\": true }" }; - Dataset ds2{ Datatype::INT, { 10, 5 } }; + Dataset ds1{Datatype::INT, {5, 5}, "{ \"resizable\": true }"}; + Dataset ds2{Datatype::INT, {10, 5}}; // array record component -> array record component // should work - auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - E_x.resetDataset( ds1 ); - E_x.storeChunk( data1, { 0, 0 }, { 5, 5 } ); + auto E_x = write.iterations[0].meshes["E"]["x"]; + E_x.resetDataset(ds1); + E_x.storeChunk(data1, {0, 0}, {5, 5}); write.flush(); - E_x.resetDataset( ds2 ); - E_x.storeChunk( data2, { 5, 0 }, { 5, 5 } ); + E_x.resetDataset(ds2); + E_x.storeChunk(data2, {5, 0}, {5, 5}); // constant record component -> constant record component // should work - auto E_y = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; - E_y.resetDataset( ds1 ); - E_y.makeConstant( 10 ); + auto E_y = write.iterations[0].meshes["E"]["y"]; + E_y.resetDataset(ds1); + E_y.makeConstant(10); write.flush(); - E_y.resetDataset( ds2 ); + E_y.resetDataset(ds2); write.flush(); // empty record component -> empty record component // should work // this does not make a lot of sense since we don't allow shrinking, // but let's just reset it to itself - auto E_z = write.iterations[ 0 ].meshes[ "E" ][ "z" ]; - E_z.makeEmpty< int >( 3 ); + auto E_z = write.iterations[0].meshes["E"]["z"]; + E_z.makeEmpty(3); write.flush(); - E_z.makeEmpty< int >( 3 ); + E_z.makeEmpty(3); write.flush(); // empty record component -> empty record component // (created by resetDataset) // should work - auto E_a = write.iterations[ 0 ].meshes[ "E" ][ "a" ]; - E_a.makeEmpty< int >( 3 ); + auto E_a = write.iterations[0].meshes["E"]["a"]; + E_a.makeEmpty(3); write.flush(); - E_a.resetDataset( Dataset( Datatype::UNDEFINED, { 0, 1, 2 } ) ); + E_a.resetDataset(Dataset(Datatype::UNDEFINED, {0, 1, 2})); write.flush(); // constant record component -> empty record component // should fail, since this implies shrinking - auto E_b = write.iterations[ 0 ].meshes[ "E" ][ "b" ]; - E_b.resetDataset( ds1 ); - E_b.makeConstant( 10 ); + auto E_b = write.iterations[0].meshes["E"]["b"]; + E_b.resetDataset(ds1); + E_b.makeConstant(10); write.flush(); - REQUIRE_THROWS( E_b.makeEmpty< int >( 2 ) ); + REQUIRE_THROWS(E_b.makeEmpty(2)); // empty record component -> constant record component // should work - auto E_c = write.iterations[ 0 ].meshes[ "E" ][ "c" ]; - E_c.makeEmpty< int >( 3 ); + auto E_c = write.iterations[0].meshes["E"]["c"]; + E_c.makeEmpty(3); write.flush(); - E_c.resetDataset( Dataset( { 1, 1, 2 } ) ); + E_c.resetDataset(Dataset({1, 1, 2})); write.flush(); // array record component -> constant record component // should fail - auto E_d = write.iterations[ 0 ].meshes[ "E" ][ "d" ]; - E_d.resetDataset( ds1 ); - E_d.storeChunk( data1, { 0, 0 }, { 5, 5 } ); + auto E_d = write.iterations[0].meshes["E"]["d"]; + E_d.resetDataset(ds1); + E_d.storeChunk(data1, {0, 0}, {5, 5}); write.flush(); - REQUIRE_THROWS( E_d.makeConstant( 5 ) ); + REQUIRE_THROWS(E_d.makeConstant(5)); // array record component -> empty record component // should fail - auto E_e = write.iterations[ 0 ].meshes[ "E" ][ "e" ]; - E_e.resetDataset( ds1 ); - E_e.storeChunk( data1, { 0, 0 }, { 5, 5 } ); + auto E_e = write.iterations[0].meshes["E"]["e"]; + E_e.resetDataset(ds1); + E_e.storeChunk(data1, {0, 0}, {5, 5}); write.flush(); - REQUIRE_THROWS( E_e.makeEmpty< int >( 5 ) ); + REQUIRE_THROWS(E_e.makeEmpty(5)); } { - Series read( filename, Access::READ_ONLY ); - auto E_x = read.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getExtent() == Extent{ 10, 5 } ); - auto chunk = E_x.loadChunk< int >( { 0, 0 }, { 10, 5 } ); + Series read(filename, Access::READ_ONLY); + auto E_x = read.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getExtent() == Extent{10, 5}); + auto chunk = E_x.loadChunk({0, 0}, {10, 5}); read.flush(); - for( size_t i = 0; i < 50; ++i ) + for (size_t i = 0; i < 50; ++i) { - REQUIRE( chunk.get()[ i ] == int( i ) ); + REQUIRE(chunk.get()[i] == int(i)); } - auto E_y = read.iterations[ 0 ].meshes[ "E" ][ "y" ]; - REQUIRE( E_y.getExtent() == Extent{ 10, 5 } ); + auto E_y = read.iterations[0].meshes["E"]["y"]; + REQUIRE(E_y.getExtent() == Extent{10, 5}); - auto E_z = read.iterations[ 0 ].meshes[ "E" ][ "z" ]; - REQUIRE( E_z.getExtent() == Extent{ 0, 0, 0 } ); + auto E_z = read.iterations[0].meshes["E"]["z"]; + REQUIRE(E_z.getExtent() == Extent{0, 0, 0}); - auto E_a = read.iterations[ 0 ].meshes[ "E" ][ "a" ]; - REQUIRE( E_a.getExtent() == Extent{ 0, 1, 2 } ); + auto E_a = read.iterations[0].meshes["E"]["a"]; + REQUIRE(E_a.getExtent() == Extent{0, 1, 2}); // E_b could not be changed - auto E_c = read.iterations[ 0 ].meshes[ "E" ][ "c" ]; - REQUIRE( E_c.getExtent() == Extent{ 1, 1, 2 } ); - REQUIRE( !E_c.empty() ); + auto E_c = read.iterations[0].meshes["E"]["c"]; + REQUIRE(E_c.getExtent() == Extent{1, 1, 2}); + REQUIRE(!E_c.empty()); } } -TEST_CASE( "extend_dataset", "[serial]" ) +TEST_CASE("extend_dataset", "[serial]") { - extendDataset( "json" ); + extendDataset("json"); #if openPMD_HAVE_ADIOS2 - extendDataset( "bp" ); + extendDataset("bp"); #endif #if openPMD_HAVE_HDF5 // extensible datasets require chunking // skip this test for if chunking is disabled - if( auxiliary::getEnvString( "OPENPMD_HDF5_CHUNKS", "auto" ) != "none" ) + if (auxiliary::getEnvString("OPENPMD_HDF5_CHUNKS", "auto") != "none") { extendDataset("h5"); } #endif } - -void deferred_parsing( std::string const & extension ) +void deferred_parsing(std::string const &extension) { - if( auxiliary::directory_exists( "../samples/lazy_parsing" ) ) - auxiliary::remove_directory( "../samples/lazy_parsing" ); + if (auxiliary::directory_exists("../samples/lazy_parsing")) + auxiliary::remove_directory("../samples/lazy_parsing"); std::string const basename = "../samples/lazy_parsing/lazy_parsing_"; // create a single iteration { - Series series( basename + "%06T." + extension, Access::CREATE ); - std::vector< float > buffer( 20 ); - std::iota( buffer.begin(), buffer.end(), 0.f ); - auto dataset = series.iterations[ 1000 ].meshes[ "E" ][ "x" ]; - dataset.resetDataset( { Datatype::FLOAT, { 20 } } ); - dataset.storeChunk( buffer, { 0 }, { 20 } ); + Series series(basename + "%06T." + extension, Access::CREATE); + std::vector buffer(20); + std::iota(buffer.begin(), buffer.end(), 0.f); + auto dataset = series.iterations[1000].meshes["E"]["x"]; + dataset.resetDataset({Datatype::FLOAT, {20}}); + dataset.storeChunk(buffer, {0}, {20}); series.flush(); } // create some empty pseudo files // if the reader tries accessing them it's game over { - for( size_t i = 0; i < 1000; i += 100 ) + for (size_t i = 0; i < 1000; i += 100) { - std::string infix = std::to_string( i ); + std::string infix = std::to_string(i); std::string padding; - for( size_t j = 0; j < 6 - infix.size(); ++j ) + for (size_t j = 0; j < 6 - infix.size(); ++j) { padding += "0"; } infix = padding + infix; std::ofstream file; - file.open( basename + infix + "." + extension ); + file.open(basename + infix + "." + extension); file.close(); } } @@ -4692,76 +5189,73 @@ void deferred_parsing( std::string const & extension ) Series series( basename + "%06T." + extension, Access::READ_ONLY, - "{\"defer_iteration_parsing\": true}" ); - auto dataset = series.iterations[ 1000 ] - .open() - .meshes[ "E" ][ "x" ] - .loadChunk< float >( { 0 }, { 20 } ); + "{\"defer_iteration_parsing\": true}"); + auto dataset = + series.iterations[1000].open().meshes["E"]["x"].loadChunk( + {0}, {20}); series.flush(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } } { Series series( basename + "%06T." + extension, Access::READ_WRITE, - "{\"defer_iteration_parsing\": true}" ); - auto dataset = series.iterations[ 1000 ] - .open() - .meshes[ "E" ][ "x" ] - .loadChunk< float >( { 0 }, { 20 } ); + "{\"defer_iteration_parsing\": true}"); + auto dataset = + series.iterations[1000].open().meshes["E"]["x"].loadChunk( + {0}, {20}); series.flush(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } // create a new iteration - std::vector< float > buffer( 20 ); - std::iota( buffer.begin(), buffer.end(), 0.f ); - auto writeDataset = series.iterations[ 1001 ].meshes[ "E" ][ "x" ]; - writeDataset.resetDataset( { Datatype::FLOAT, { 20 } } ); - writeDataset.storeChunk( buffer, { 0 }, { 20 } ); + std::vector buffer(20); + std::iota(buffer.begin(), buffer.end(), 0.f); + auto writeDataset = series.iterations[1001].meshes["E"]["x"]; + writeDataset.resetDataset({Datatype::FLOAT, {20}}); + writeDataset.storeChunk(buffer, {0}, {20}); series.flush(); } { Series series( basename + "%06T." + extension, Access::READ_ONLY, - "{\"defer_iteration_parsing\": true}" ); - auto dataset = series.iterations[ 1001 ] - .open() - .meshes[ "E" ][ "x" ] - .loadChunk< float >( { 0 }, { 20 } ); + "{\"defer_iteration_parsing\": true}"); + auto dataset = + series.iterations[1001].open().meshes["E"]["x"].loadChunk( + {0}, {20}); series.flush(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } } } -TEST_CASE( "deferred_parsing", "[serial]" ) +TEST_CASE("deferred_parsing", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - deferred_parsing( t ); + deferred_parsing(t); } } // @todo merge this back with the chaotic_stream test of PR #949 // (bug noticed while working on that branch) -void no_explicit_flush( std::string filename ) +void no_explicit_flush(std::string filename) { - std::vector< uint64_t > sampleData{ 5, 9, 1, 3, 4, 6, 7, 8, 2, 0 }; + std::vector sampleData{5, 9, 1, 3, 4, 6, 7, 8, 2, 0}; std::string jsonConfig = R"( { "adios2": { @@ -4774,36 +5268,82 @@ void no_explicit_flush( std::string filename ) })"; { - Series series( filename, Access::CREATE, jsonConfig ); - for( uint64_t currentIteration = 0; currentIteration < 10; - ++currentIteration ) + Series series(filename, Access::CREATE, jsonConfig); + for (uint64_t currentIteration = 0; currentIteration < 10; + ++currentIteration) { auto dataset = - series.writeIterations()[ currentIteration ] - .meshes[ "iterationOrder" ][ MeshRecordComponent::SCALAR ]; - dataset.resetDataset( { determineDatatype< uint64_t >(), { 10 } } ); - dataset.storeChunk( sampleData, { 0 }, { 10 } ); + series.writeIterations()[currentIteration] + .meshes["iterationOrder"][MeshRecordComponent::SCALAR]; + dataset.resetDataset({determineDatatype(), {10}}); + dataset.storeChunk(sampleData, {0}, {10}); // series.writeIterations()[ currentIteration ].close(); } } { - Series series( filename, Access::READ_ONLY ); + Series series(filename, Access::READ_ONLY); size_t index = 0; - for( auto iteration : series.readIterations() ) + for (auto iteration : series.readIterations()) { - REQUIRE( iteration.iterationIndex == index ); + REQUIRE(iteration.iterationIndex == index); ++index; } - REQUIRE( index == 10 ); + REQUIRE(index == 10); + } +} + +TEST_CASE("no_explicit_flush", "[serial]") +{ + for (auto const &t : testedFileExtensions()) + { + no_explicit_flush("../samples/no_explicit_flush_filebased_%T." + t); + no_explicit_flush("../samples/no_explicit_flush." + t); + } +} + +void varying_pattern(std::string const file_ending) +{ + { + std::string filename = "../samples/varying_pattern_%06T." + file_ending; + ::openPMD::Series series = + ::openPMD::Series(filename, ::openPMD::Access::CREATE); + + for (auto i : {0, 8000, 10000, 100000, 2000000}) + { + auto it = series.iterations[i]; + it.setAttribute("my_step", i); + } + series.flush(); + } + { + std::string filename = "../samples/varying_pattern_%T." + file_ending; + ::openPMD::Series series = + ::openPMD::Series(filename, ::openPMD::Access::READ_ONLY); + + REQUIRE(series.iterations.size() == 5); + for (auto const &i : series.iterations) + { + auto const &step = i.first; + auto const &it = i.second; + std::cout << "Iteration: " << step << "\n"; + REQUIRE(it.getAttribute("my_step").get() == int(step)); + } + + helper::listSeries(series, true, std::cout); + + for (auto i : {0, 8000, 10000, 100000, 2000000}) + { + auto it = series.iterations[i]; + REQUIRE(it.getAttribute("my_step").get() == i); + } } } -TEST_CASE( "no_explicit_flush", "[serial]" ) +TEST_CASE("varying_zero_pattern", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - no_explicit_flush( "../samples/no_explicit_flush_filebased_%T." + t ); - no_explicit_flush( "../samples/no_explicit_flush." + t ); + varying_pattern(t); } } diff --git a/test/python/unittest/API/APITest.py b/test/python/unittest/API/APITest.py index 5f55f4731c..19c86b2ae5 100644 --- a/test/python/unittest/API/APITest.py +++ b/test/python/unittest/API/APITest.py @@ -6,13 +6,14 @@ License: LGPLv3+ """ -import openpmd_api as io - +import ctypes import gc import os import shutil import unittest -import ctypes + +import openpmd_api as io + try: import numpy as np found_numpy = True diff --git a/test/python/unittest/Test.py b/test/python/unittest/Test.py index b0652c3bbc..f094b46a2b 100644 --- a/test/python/unittest/Test.py +++ b/test/python/unittest/Test.py @@ -6,8 +6,8 @@ License: LGPLv3+ """ -import unittest import sys +import unittest # Import suites to run. from API.APITest import APITest