diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index d8eddedb02..d85e29d90e 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -22,6 +22,7 @@ #include "openPMD/RecordComponent.hpp" #include "openPMD/Dataset.hpp" +#include #include @@ -47,6 +48,11 @@ RecordComponent::resetDataset(Dataset d) { if( written ) throw std::runtime_error("A Records Dataset can not (yet) be changed after it has been written."); + if( d.extent.empty() ) + throw std::runtime_error("Dataset extent must be at least 1D."); + if( std::any_of(d.extent.begin(), d.extent.end(), + [](Extent::value_type const& i) { return i == 0u; }) ) + throw std::runtime_error("Dataset extent must not be zero in any dimension."); *m_dataset = d; dirty = true; diff --git a/src/backend/PatchRecordComponent.cpp b/src/backend/PatchRecordComponent.cpp index 91c6448941..75616ee9d2 100644 --- a/src/backend/PatchRecordComponent.cpp +++ b/src/backend/PatchRecordComponent.cpp @@ -21,6 +21,8 @@ #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" +#include + namespace openPMD { @@ -36,6 +38,11 @@ PatchRecordComponent::resetDataset(Dataset d) { if( written ) throw std::runtime_error("A Records Dataset can not (yet) be changed after it has been written."); + if( d.extent.empty() ) + throw std::runtime_error("Dataset extent must be at least 1D."); + if( std::any_of(d.extent.begin(), d.extent.end(), + [](Extent::value_type const& i) { return i == 0u; }) ) + throw std::runtime_error("Dataset extent must not be zero in any dimension."); *m_dataset = d; dirty = true; diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 643fae29ff..a5e84f315c 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -618,4 +618,22 @@ TEST_CASE( "empty_record_test", "[core]" ) Catch::Equals("A Record can not be written without any contained RecordComponents: E")); o.iterations[1].meshes["E"][RecordComponent::SCALAR].resetDataset(Dataset(Datatype::DOUBLE, {1})); o.flush(); -} \ No newline at end of file +} + +TEST_CASE( "zero_extent_component", "[core]" ) +{ + Series o = Series("./new_openpmd_output", AccessType::CREATE); + + auto E_x = o.iterations[1].meshes["E"]["x"]; + E_x.setComment("Datasets with zero extent in any dimension are not allowed."); + REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::INT64, {0})), + Catch::Equals("Dataset extent must not be zero in any dimension.")); + REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::INT64, {1, 0})), + Catch::Equals("Dataset extent must not be zero in any dimension.")); + REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::INT64, {0, 1})), + Catch::Equals("Dataset extent must not be zero in any dimension.")); + E_x.setComment("Datasets must contain dimensions."); + REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::INT64, {})), + Catch::Equals("Dataset extent must be at least 1D.")); + E_x.resetDataset(Dataset(Datatype::DOUBLE, {1})); +} diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 79d2beeb9e..3ab0d9e640 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -140,6 +140,60 @@ TEST_CASE( "hdf5_write_test", "[parallel][hdf5]" ) o.flush(); } + +TEST_CASE( "hdf5_write_test_zero_extent", "[parallel][hdf5]" ) +{ + int mpi_s{-1}; + int mpi_r{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &mpi_s); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r); + uint64_t size = static_cast(mpi_s); + uint64_t rank = static_cast(mpi_r); + Series o = Series("../samples/parallel_write_zero_extent.h5", AccessType::CREATE, MPI_COMM_WORLD); + + ParticleSpecies& e = o.iterations[1].particles["e"]; + + /* every rank n writes n consecutive cells, increasing values + * rank 0 does a zero-extent write + * two ranks will result in {1} + * three ranks will result in {1, 2, 3} + * four ranks will result in {1, 2, 3, 4, 5, 6} */ + uint64_t num_cells = ((size-1)*(size-1) + (size-1))/2; /* (n^2 + n) / 2 */ + if( num_cells == 0u ) + { + std::cerr << "Test can only be run with at least two ranks" << std::endl; + return; + } + + std::vector< double > position_global(num_cells); + double pos{1.}; + std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); + std::shared_ptr< double > position_local(new double[rank]); + uint64_t offset; + if( rank != 0 ) + offset = ((rank-1)*(rank-1) + (rank-1))/2; + else + offset = 0; + + e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells})); + + std::vector< uint64_t > positionOffset_global(num_cells); + uint64_t posOff{1}; + std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); + std::shared_ptr< uint64_t > positionOffset_local(new uint64_t[rank]); + + e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells})); + + for( uint64_t i = 0; i < rank; ++i ) + { + position_local.get()[i] = position_global[offset + i]; + positionOffset_local.get()[i] = positionOffset_global[offset + i]; + } + e["position"]["x"].storeChunk({offset}, {rank}, position_local); + e["positionOffset"]["x"].storeChunk({offset}, {rank}, positionOffset_local); + + //TODO read back, verify +} #else TEST_CASE( "no_parallel_hdf5", "[parallel][hdf5]" ) { @@ -182,6 +236,60 @@ TEST_CASE( "adios_write_test", "[parallel][adios]" ) o.flush(); } +TEST_CASE( "adios_write_test_zero_extent", "[parallel][adios]" ) +{ + int mpi_s{-1}; + int mpi_r{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &mpi_s); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r); + uint64_t size = static_cast(mpi_s); + uint64_t rank = static_cast(mpi_r); + Series o = Series("../samples/parallel_write_zero_extent.bp", AccessType::CREATE, MPI_COMM_WORLD); + + ParticleSpecies& e = o.iterations[1].particles["e"]; + + /* every rank n writes n consecutive cells, increasing values + * rank 0 does a zero-extent write + * two ranks will result in {1} + * three ranks will result in {1, 2, 3} + * four ranks will result in {1, 2, 3, 4, 5, 6} */ + uint64_t num_cells = ((size-1)*(size-1) + (size-1))/2; /* (n^2 + n) / 2 */ + if( num_cells == 0u ) + { + std::cerr << "Test can only be run with at least two ranks" << std::endl; + return; + } + + std::vector< double > position_global(num_cells); + double pos{1.}; + std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); + std::shared_ptr< double > position_local(new double[rank]); + uint64_t offset; + if( rank != 0 ) + offset = ((rank-1)*(rank-1) + (rank-1))/2; + else + offset = 0; + + e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells})); + + std::vector< uint64_t > positionOffset_global(num_cells); + uint64_t posOff{1}; + std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); + std::shared_ptr< uint64_t > positionOffset_local(new uint64_t[rank]); + + e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells})); + + for( uint64_t i = 0; i < rank; ++i ) + { + position_local.get()[i] = position_global[offset + i]; + positionOffset_local.get()[i] = positionOffset_global[offset + i]; + } + e["position"]["x"].storeChunk({offset}, {rank}, position_local); + e["positionOffset"]["x"].storeChunk({offset}, {rank}, positionOffset_local); + + //TODO read back, verify +} + TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" ) { int mpi_rank{-1};