diff --git a/CMakeLists.txt b/CMakeLists.txt
index fbb4633927..4746d865f8 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -219,7 +219,7 @@ set(CORE_SOURCE
src/backend/Writable.cpp)
set(IO_SOURCE
src/IO/AbstractIOHandler.cpp
- src/IO/IOTask.cpp
+ src/IO/AbstractIOHandlerImpl.cpp
src/IO/ADIOS/ADIOS1IOHandler.cpp
src/IO/ADIOS/ParallelADIOS1IOHandler.cpp
src/IO/ADIOS/ADIOS2IOHandler.cpp
@@ -507,21 +507,30 @@ endif()
# Examples ####################################################################
#
-# TODO: C++ Examples
-#if(EXISTS "${openPMD_BINARY_DIR}/samples/git-sample/")
-# foreach(examplename ${openPMD_EXAMPLE_NAMES})
-# add_test(NAME Example.py.${examplename}
-# COMMAND ${examplename}
-# WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-# )
-# endforeach()
-#else()
-# message(STATUS "Missing samples/git-sample/ directory! "
-# "Skipping C++ example test!\n\n"
-# "Note: run\n"
-# " . ${openPMD_SOURCE_DIR}/.travis/download_samples.sh\n"
-# "to add example files!")
-#endif()
+# C++ Examples
+if(EXISTS "${openPMD_BINARY_DIR}/samples/git-sample/")
+ foreach(examplename ${openPMD_EXAMPLE_NAMES})
+ if(${examplename} MATCHES "^.*_parallel$")
+ if(openPMD_HAVE_MPI)
+ add_test(NAME MPI.${examplename}
+ COMMAND ${MPI_TEST_EXE} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${examplename}
+ WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
+ )
+ endif()
+ else()
+ add_test(NAME Serial.${examplename}
+ COMMAND ${examplename}
+ WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
+ )
+ endif()
+ endforeach()
+else()
+ message(STATUS "Missing samples/git-sample/ directory! "
+ "Skipping C++ example test!\n\n"
+ "Note: run\n"
+ " . ${openPMD_SOURCE_DIR}/.travis/download_samples.sh\n"
+ "to add example files!")
+endif()
# Python Examples
if(openPMD_HAVE_PYTHON)
diff --git a/docs/source/dev/backend.rst b/docs/source/dev/backend.rst
index 783bf7d7da..0763d3851d 100644
--- a/docs/source/dev/backend.rst
+++ b/docs/source/dev/backend.rst
@@ -3,4 +3,269 @@
How to Write a Backend
======================
-*TBD*
+Adding support for additional types of file storage or data transportation is possible by creating a backend.
+Backend design has been kept independent of the openPMD-specific logic that maintains all constraints within a file.
+This should allow easy introduction of new file formats with only little knowledge about the rest of the system.
+
+
+File Formats
+------------
+To get started, you should create a new file format in ``include/openPMD/IO/Format.hpp`` representing the new backend.
+Note that this enumeration value will never be seen by users of openPMD-api, but should be kept short and concise to
+improve readability.
+
+.. code-block:: cpp
+
+ enum class Format
+ {
+ JSON
+ };
+
+In order to use the file format through the API, you need to provide unique and characteristic filename extensions that
+are associated with it. This happens in ``src/Series.cpp``:
+
+.. code-block:: cpp
+
+ Format
+ determineFormat(std::string const& filename)
+ {
+ if( auxiliary::ends_with(filename, ".json") )
+ return Format::JSON;
+ }
+
+.. code-block:: cpp
+
+ std::string
+ cleanFilename(std::string const& filename, Format f)
+ {
+ switch( f )
+ {
+ case Format::JSON:
+ return auxiliary::replace_last(filename, ".json", "");
+ }
+ }
+
+Unless your file format imposes additional restrictions to the openPMD constraints, this is all you have to do in the
+frontend section of the API.
+
+IO Handler
+----------
+Now that the user can specify that the new backend is to be used, a concrete mechanism for handling IO interactions is
+required. We call this an ``IOHandler``. It is not concerned with any logic or constraints enforced by openPMD, but
+merely offers a small set of elementary IO operations.
+
+On the very basic level, you will need to derive a class from ``AbstractIOHandler``:
+
+.. code-block:: cpp
+
+ /* file: include/openPMD/IO/JSON/JSONIOHandler.hpp */
+ #include "openPMD/IO/AbstractIOHandler.hpp"
+
+ namespace openPMD
+ {
+ class JSONIOHandler : public AbstractIOHandler
+ {
+ public:
+ JSONIOHandler(std::string const& path, AccessType);
+ virtual ~JSONIOHandler();
+
+ std::future< void > flush() override;
+ }
+ } // openPMD
+
+.. code-block:: cpp
+
+ /* file: src/IO/JSON/JSONIOHandler.cpp */
+ #include "openPMD/IO/JSON/JSONIOHandler.hpp"
+
+ namespace openPMD
+ {
+ JSONIOHandler::JSONIOHandler(std::string const& path, AccessType at)
+ : AbstractIOHandler(path, at)
+ { }
+
+ JSONIOHandler::~JSONIOHandler()
+ { }
+
+ std::future< void >
+ JSONIOHandler::flush()
+ { return std::future< void >(); }
+ } // openPMD
+
+Familiarizing your backend with the rest of the API happens in just one place in ``src/IO/AbstractIOHandler.cpp``:
+
+.. code-block:: cpp
+
+ #if openPMD_HAVE_MPI
+ std::shared_ptr< AbstractIOHandler >
+ AbstractIOHandler::createIOHandler(std::string const& path,
+ AccessType at,
+ Format f,
+ MPI_Comm comm)
+ {
+ switch( f )
+ {
+ case Format::JSON:
+ std::cerr << "No MPI-aware JSON backend available. "
+ "Falling back to the serial backend! "
+ "Possible failure and degraded performance!" << std::endl;
+ return std::make_shared< JSONIOHandler >(path, at);
+ }
+ }
+ #endif
+
+ std::shared_ptr< AbstractIOHandler >
+ AbstractIOHandler::createIOHandler(std::string const& path,
+ AccessType at,
+ Format f)
+ {
+ switch( f )
+ {
+ case Format::JSON:
+ return std::make_shared< JSONIOHandler >(path, at);
+ }
+ }
+
+In this state, the backend will do no IO operations and just act as a dummy that ignores all queries.
+
+IO Task Queue
+-------------
+Operations between the logical representation in this API and physical storage are funneled through a queue ``m_work``
+that is contained in the newly created IOHandler. Contained in this queue are ``IOTask`` s that have to be processed in
+FIFO order (unless you can prove sequential execution guarantees for out-of-order execution) when
+``AbstractIOHandler::flush()`` is called. A **recommended** skeleton is provided in ``AbstractIOHandlerImpl``. Note
+that emptying the queue this way is not required and might not fit your IO scheme.
+
+Using the provided skeleton involves
+ - deriving an IOHandlerImpl for your IOHandler and
+ - delegating all flush calls to the IOHandlerImpl:
+
+.. code-block:: cpp
+
+ /* file: include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp */
+ #include "openPMD/IO/AbstractIOHandlerImpl.hpp"
+
+ namespace openPMD
+ {
+ class JSONIOHandlerImpl : public AbstractIOHandlerImpl
+ {
+ public:
+ JSONIOHandlerImpl(AbstractIOHandler*);
+ virtual ~JSONIOHandlerImpl();
+
+ virtual void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override;
+ virtual void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override;
+ virtual void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override;
+ virtual void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override;
+ virtual void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override;
+ virtual void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override;
+ virtual void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override;
+ virtual void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override;
+ virtual void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override;
+ virtual void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override;
+ virtual void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override;
+ virtual void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override;
+ virtual void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override;
+ virtual void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override;
+ virtual void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override;
+ virtual void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override;
+ virtual void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override;
+ virtual void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override;
+ }
+ } // openPMD
+
+.. code-block:: cpp
+
+ /* file: include/openPMD/IO/JSON/JSONIOHandler.hpp */
+ #include "openPMD/IO/AbstractIOHandler.hpp"
+ #include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp"
+
+ namespace openPMD
+ {
+ class JSONIOHandler : public AbstractIOHandler
+ {
+ public:
+ /* ... */
+ private:
+ JSONIOHandlerImpl m_impl;
+ }
+ } // openPMD
+
+.. code-block:: cpp
+
+ /* file: src/IO/JSON/JSONIOHandler.cpp */
+ #include "openPMD/IO/JSON/JSONIOHandler.hpp"
+
+ namespace openPMD
+ {
+ /*...*/
+ std::future< void >
+ JSONIOHandler::flush()
+ {
+ return m_impl->flush();
+ }
+ } // openPMD
+
+Each IOTask contains a pointer to a ``Writable`` that corresponds to one object in the openPMD hierarchy. This object
+may be a group or a dataset. When processing certain types of IOTasks in the queue, you will have to assign unique
+FilePositions to these objects to identify the logical object in your physical storage. For this, you need to derive
+a concrete FilePosition for your backend from ``AbstractFilePosition``. There is no requirement on how to identify your
+objects, but ids from your IO library and positional strings are good candidates.
+
+.. code-block:: cpp
+
+ /* file: include/openPMD/IO/JSON/JSONFilePosition.hpp */
+ #include "openPMD/IO/AbstractFilePosition.hpp"
+
+ namespace openPMD
+ {
+ struct JSONFilePosition : public AbstractFilePosition
+ {
+ JSONFilePosition(uint64_t id)
+ : id{id}
+ { }
+
+ uint64_t id;
+ };
+ } // openPMD
+
+From this point, all that is left to do is implement the elementary IO operations provided in the IOHandlerImpl. The
+``Parameter`` structs contain both input parameters (from storage to API) and output parameters (from API to storage).
+The easy way to distinguish between the two parameter sets is their C++ type: Input parameters are
+``std::shared_ptr`` s that allow you to pass the requested data to their destination. Output parameters are all objects
+that are *not* ``std::shared_ptr`` s. The contract of each function call is outlined in
+``include/openPMD/IO/AbstractIOHandlerImpl``.
+
+.. code-block:: cpp
+
+ /* file: src/IO/JSON/JSONIOHandlerImpl.cpp */
+ #include "openPMD/IO/JSONIOHandlerImpl.hpp"
+
+ namespace openPMD
+ {
+ void
+ JSONIOHandlerImpl::createFile(Writable* writable,
+ Parameter< Operation::CREATE_FILE > const& parameters)
+ {
+ if( !writable->written )
+ {
+ path dir(m_handler->directory);
+ if( !exists(dir) )
+ create_directories(dir);
+
+ std::string name = m_handler->directory + parameters.name;
+ if( !auxiliary::ends_with(name, ".json") )
+ name += ".json";
+
+ uint64_t id = /*...*/
+ ASSERT(id >= 0, "Internal error: Failed to create JSON file");
+
+ writable->written = true;
+ writable->abstractFilePosition = std::make_shared< JSONFilePosition >(id);
+ }
+ }
+ /*...*/
+ } // openPMD
+
+Note that you might have to keep track of open file handles if they have to be closed explicitly during destruction of
+the IOHandlerImpl (prominent in C-style frameworks).
\ No newline at end of file
diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp
index b5bf6077a4..b50219d01d 100755
--- a/include/openPMD/IO/AbstractIOHandler.hpp
+++ b/include/openPMD/IO/AbstractIOHandler.hpp
@@ -74,7 +74,7 @@ class AbstractIOHandler
{
public:
#if openPMD_HAVE_MPI
- /** Construct an appropriate specific IOHandler for the desired IO mode.
+ /** Construct an appropriate specific IOHandler for the desired IO mode that may be MPI-aware.
*
* @param path Path to root folder for all operations associated with the desired handler.
* @param accessType AccessType describing desired operations and permissions of the desired handler.
diff --git a/include/openPMD/IO/AbstractIOHandlerImpl.hpp b/include/openPMD/IO/AbstractIOHandlerImpl.hpp
new file mode 100644
index 0000000000..97c8e1ff82
--- /dev/null
+++ b/include/openPMD/IO/AbstractIOHandlerImpl.hpp
@@ -0,0 +1,218 @@
+/* Copyright 2018 Fabian Koller
+ *
+ * This file is part of openPMD-api.
+ *
+ * openPMD-api is free software: you can redistribute it and/or modify
+ * it under the terms of of either the GNU General Public License or
+ * the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * openPMD-api is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with openPMD-api.
+ * If not, see .
+ */
+#pragma once
+
+#include "openPMD/IO/IOTask.hpp"
+
+#include
+
+
+namespace openPMD
+{
+class AbstractIOHandler;
+class Writable;
+
+class AbstractIOHandlerImpl
+{
+public:
+ AbstractIOHandlerImpl(AbstractIOHandler*);
+ virtual ~AbstractIOHandlerImpl();
+
+ virtual std::future< void > flush();
+
+ /** Create a new file in physical storage, possibly overriding an existing file.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The new file should be located in m_handler->directory.
+ * The new file should have the filename parameters.name.
+ * The filename should include the correct corresponding filename extension.
+ * Any existing file should be overwritten if m_handler->accessType is AccessType::CREATE.
+ * The Writables file position should correspond to the root group "/" of the hierarchy.
+ * The Writable should be marked written when the operation completes successfully.
+ */
+ virtual void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) = 0;
+ /** Create all necessary groups for a path, possibly recursively.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The path parameters.path may contain multiple levels (e.g. first/second/third/).
+ * The Writables file position should correspond to the complete newly created path (i.e. first/second/third/ should be assigned to the Writables file position).
+ * The Writable should be marked written when the operation completes successfully.
+ */
+ virtual void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) = 0;
+ /** Create a new dataset of given type, extent and storage properties.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The path may contain multiple levels (e.g. group/dataset).
+ * The new dataset should have the name parameters.name. This name should not start or end with a slash ("/").
+ * The new dataset should be of datatype parameters.dtype.
+ * The new dataset should have an extent of parameters.extent.
+ * If possible, the new dataset should be extensible.
+ * If possible, the new dataset should be divided into chunks with size parameters.chunkSize.
+ * If possible, the new dataset should be compressed according to parameters.compression. This may be format-specific.
+ * If possible, the new dataset should be transformed accoring to parameters.transform. This may be format-specific.
+ * The Writables file position should correspond to the newly created dataset.
+ * The Writable should be marked written when the operation completes successfully.
+ */
+ virtual void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) = 0;
+ /** Increase the extent of an existing dataset.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The operation should fail if the dataset does not yet exist.
+ * The dataset should have the name parameters.name. This name should not start or end with a slash ("/").
+ * The operation should fail if the new extent is not strictly large in every dimension.
+ * The dataset should have an extent of parameters.extent.
+ */
+ virtual void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) = 0;
+ /** Open an existing file assuming it conforms to openPMD.
+ *
+ * The operation should fail if m_handler->directory is not accessible.
+ * The opened file should have filename parameters.name and include the correct corresponding filename extension.
+ * The operation should not open files more than once.
+ * If possible, the file should be opened with read-only permissions if m_handler->accessType is AccessType::READ_ONLY.
+ * The Writables file position should correspond to the root group "/" of the hierarchy in the opened file.
+ * The Writable should be marked written when the operation completes successfully.
+ */
+ virtual void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) = 0;
+ /** Open all contained groups in a path, possibly recursively.
+ *
+ * The operation should overwrite existing file positions, even when the Writable was already marked written.
+ * The path parameters.path may contain multiple levels (e.g. first/second/third/). This path should be relative (i.e. it should not start with a slash "/").
+ * The Writables file position should correspond to the complete opened path (i.e. first/second/third/ should be assigned to the Writables file position).
+ * The Writable should be marked written when the operation completes successfully.
+ */
+ virtual void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) = 0;
+ /** Open an existing dataset and determine its datatype and extent.
+ *
+ * The opened dataset should be located in a group below the group of the Writables parent writable->parent.
+ * The opened datasets name should be parameters.name. This name should not start or end with a slash ("/").
+ * The opened datasets datatype should be stored in *(parameters.dtype).
+ * The opened datasets extent should be stored in *(parameters.extent).
+ * The Writables file position should correspond to the opened dataset.
+ * The Writable should be marked written when the operation completes successfully.
+ */
+ virtual void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) = 0;
+ /** Delete an existing file from physical storage.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The operation should pass if the Writable was not marked written.
+ * All handles that correspond to the file should be closed before deletion.
+ * The file to delete should have the filename parameters.name.
+ * The filename should include the correct corresponding filename extension.
+ * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr).
+ * The Writable should be marked not written when the operation completes successfully.
+ */
+ virtual void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) = 0;
+ /** Delete all objects within an existing path.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The operation should pass if the Writable was not marked written.
+ * The path parameters.path may contain multiple levels (e.g. first/second/third/). This path should be relative (i.e. it should not start with a slash "/"). It may also contain the current group ".".
+ * All groups and datasets starting from the path should not be accessible in physical storage after the operation completes successfully.
+ * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr).
+ * The Writable should be marked not written when the operation completes successfully.
+ */
+ virtual void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) = 0;
+ /** Delete an existing dataset.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The operation should pass if the Writable was not marked written.
+ * The dataset should have the name parameters.name. This name should not start or end with a slash ("/"). It may also contain the current dataset ".".
+ * The dataset should not be accessible in physical storage after the operation completes successfully.
+ * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr).
+ * The Writable should be marked not written when the operation completes successfully.
+ */
+ virtual void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) = 0;
+ /** Delete an existing attribute.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The operation should pass if the Writable was not marked written.
+ * The attribute should be associated with the Writable and have the name parameters.name before deletion.
+ * The attribute should not be accessible in physical storage after the operation completes successfully.
+ */
+ virtual void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) = 0;
+ /** Write a chunk of data into an existing dataset.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The dataset should be associated with the Writable.
+ * The operation should fail if the dataset does not exist.
+ * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension.
+ * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset.
+ * The dataset should match the dataype parameters.dtype.
+ * The data parameters.data is a cast-to-void pointer to a flattened version of the chunk data. It should be re-cast to the provided datatype. The chunk is stored row-major.
+ * The region of the chunk should be written to physical storage after the operation completes successully.
+ */
+ virtual void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) = 0;
+ /** Create a single attribute and fill the value, possibly overwriting an existing attribute.
+ *
+ * The operation should fail if m_handler->accessType is AccessType::READ_ONLY.
+ * The attribute should have the name parameters.name. This name should not contain a slash ("/").
+ * The attribute should be of datatype parameters.dtype.
+ * Any existing attribute with the same name should be overwritten. If possible, only the value should be changed if the datatype stays the same.
+ * The attribute should be written to physical storage after the operation completes successfully.
+ * All datatypes of Datatype should be supported in a type-safe way.
+ */
+ virtual void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) = 0;
+ /** Read a chunk of data from an existing dataset.
+ *
+ * The dataset should be associated with the Writable.
+ * The operation should fail if the dataset does not exist.
+ * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension.
+ * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset.
+ * The dataset should match the dataype parameters.dtype.
+ * The data parameters.data should be a cast-to-void pointer to a flattened version of the chunk data. The chunk should be stored row-major.
+ * The region of the chunk should be written to the location indicated by the pointer after the operation completes successfully.
+ */
+ virtual void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) = 0;
+ /** Read the value of an existing attribute.
+ *
+ * The operation should fail if the Writable was not marked written.
+ * The operation should fail if the attribute does not exist.
+ * The attribute should be associated with the Writable and have the name parameters.name. This name should not contain a slash ("/").
+ * The attribute datatype should be stored in the location indicated by the pointer parameters.dtype.
+ * The attribute value should be stored as a generic Variant::resource in the location indicated by the pointer parameters.resource.
+ * All datatypes of Datatype should be supported in a type-safe way.
+ */
+ virtual void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) = 0;
+ /** List all paths/sub-groups inside a group, non-recursively.
+ *
+ * The operation should fail if the Writable was not marked written.
+ * The operation should fail if the Writable is not a group.
+ * The list of group names should be stored in the location indicated by the pointer parameters.paths.
+ */
+ virtual void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) = 0;
+ /** List all datasets inside a group, non-recursively.
+ *
+ * The operation should fail if the Writable was not marked written.
+ * The operation should fail if the Writable is not a group.
+ * The list of dataset names should be stored in the location indicated by the pointer parameters.datasets.
+ */
+ virtual void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) = 0;
+ /** List all attributes associated with an object.
+ *
+ * The operation should fail if the Writable was not marked written.
+ * The attribute should be associated with the Writable.
+ * The list of attribute names should be stored in the location indicated by the pointer parameters.attributes.
+ */
+ virtual void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) = 0;
+
+ AbstractIOHandler* m_handler;
+}; //AbstractIOHandlerImpl
+} // openPMD
diff --git a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp
index 68260d8a5d..0693f5dd98 100755
--- a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp
+++ b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp
@@ -1,4 +1,4 @@
-/* Copyright 2017 Fabian Koller
+/* Copyright 2017-2018 Fabian Koller
*
* This file is part of openPMD-api.
*
@@ -21,6 +21,7 @@
#pragma once
#include "openPMD/IO/AbstractIOHandler.hpp"
+#include "openPMD/IO/AbstractIOHandlerImpl.hpp"
#if openPMD_HAVE_HDF5
# include
@@ -38,33 +39,30 @@ namespace openPMD
#if openPMD_HAVE_HDF5
class HDF5IOHandler;
-class HDF5IOHandlerImpl
+class HDF5IOHandlerImpl : public AbstractIOHandlerImpl
{
public:
HDF5IOHandlerImpl(AbstractIOHandler*);
virtual ~HDF5IOHandlerImpl();
- virtual std::future< void > flush();
-
- using ArgumentMap = std::map< std::string, ParameterArgument >;
- virtual void createFile(Writable*, ArgumentMap const&);
- virtual void createPath(Writable*, ArgumentMap const&);
- virtual void createDataset(Writable*, ArgumentMap const&);
- virtual void extendDataset(Writable*, ArgumentMap const&);
- virtual void openFile(Writable*, ArgumentMap const&);
- virtual void openPath(Writable*, ArgumentMap const&);
- virtual void openDataset(Writable*, ArgumentMap &);
- virtual void deleteFile(Writable*, ArgumentMap const&);
- virtual void deletePath(Writable*, ArgumentMap const&);
- virtual void deleteDataset(Writable*, ArgumentMap const&);
- virtual void deleteAttribute(Writable*, ArgumentMap const&);
- virtual void writeDataset(Writable*, ArgumentMap const&);
- virtual void writeAttribute(Writable*, ArgumentMap const&);
- virtual void readDataset(Writable*, ArgumentMap &);
- virtual void readAttribute(Writable*, ArgumentMap &);
- virtual void listPaths(Writable*, ArgumentMap &);
- virtual void listDatasets(Writable*, ArgumentMap &);
- virtual void listAttributes(Writable*, ArgumentMap &);
+ virtual void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override;
+ virtual void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override;
+ virtual void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override;
+ virtual void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override;
+ virtual void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override;
+ virtual void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override;
+ virtual void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override;
+ virtual void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override;
+ virtual void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override;
+ virtual void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override;
+ virtual void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override;
+ virtual void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override;
+ virtual void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override;
+ virtual void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override;
+ virtual void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override;
+ virtual void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override;
+ virtual void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override;
+ virtual void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override;
std::unordered_map< Writable*, hid_t > m_fileIDs;
std::unordered_set< hid_t > m_openFileIDs;
@@ -73,8 +71,6 @@ class HDF5IOHandlerImpl
hid_t m_fileAccessProperty;
hid_t m_H5T_BOOL_ENUM;
-
- AbstractIOHandler* m_handler;
}; //HDF5IOHandlerImpl
#else
class HDF5IOHandlerImpl
diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp
index d03739ff65..dd58f52781 100755
--- a/include/openPMD/IO/IOTask.hpp
+++ b/include/openPMD/IO/IOTask.hpp
@@ -1,3 +1,23 @@
+/* Copyright 2017-2018 Fabian Koller
+ *
+ * This file is part of openPMD-api.
+ *
+ * openPMD-api is free software: you can redistribute it and/or modify
+ * it under the terms of of either the GNU General Public License or
+ * the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * openPMD-api is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with openPMD-api.
+ * If not, see .
+ */
#pragma once
#include "openPMD/auxiliary/Variadic.hpp"
@@ -13,35 +33,6 @@
namespace openPMD
{
-/** Concrete datatype of an object available at runtime.
- */
-enum class ArgumentDatatype : int
-{
- STRING = 0,
- VEC_UINT64,
- PTR_VOID,
- SHARED_PTR_VOID,
- DATATYPE,
- ATT_RESOURCE,
- SHARED_PTR_EXTENT,
- SHARED_PTR_DATATYPE,
- SHARED_PTR_ATT_RESOURCE,
- SHARED_PTR_VEC_STRING,
-
- UNDEFINED
-};
-using ParameterArgument = auxiliary::Variadic< ArgumentDatatype,
- std::string,
- std::vector< uint64_t >,
- void*,
- std::shared_ptr< void >,
- Datatype,
- Attribute::resource,
- std::shared_ptr< Extent >,
- std::shared_ptr< Datatype >,
- std::shared_ptr< Attribute::resource >,
- std::shared_ptr< std::vector< std::string > > >;
-
/** Type of IO operation between logical and persistent data.
*/
enum class Operation
@@ -69,6 +60,10 @@ enum class Operation
LIST_ATTS
}; //Operation
+struct AbstractParameter
+{
+ virtual ~AbstractParameter() = default;
+};
/** @brief Typesafe description of all required Arguments for a specified Operation.
*
@@ -79,54 +74,54 @@ enum class Operation
* @tparam Operation Type of Operation to be executed.
*/
template< Operation >
-struct Parameter
+struct Parameter : public AbstractParameter
{ };
template<>
-struct Parameter< Operation::CREATE_FILE >
+struct Parameter< Operation::CREATE_FILE > : public AbstractParameter
{
std::string name;
};
template<>
-struct Parameter< Operation::OPEN_FILE >
+struct Parameter< Operation::OPEN_FILE > : public AbstractParameter
{
std::string name;
};
template<>
-struct Parameter< Operation::DELETE_FILE >
+struct Parameter< Operation::DELETE_FILE > : public AbstractParameter
{
std::string name;
};
template<>
-struct Parameter< Operation::CREATE_PATH >
+struct Parameter< Operation::CREATE_PATH > : public AbstractParameter
{
std::string path;
};
template<>
-struct Parameter< Operation::OPEN_PATH >
+struct Parameter< Operation::OPEN_PATH > : public AbstractParameter
{
std::string path;
};
template<>
-struct Parameter< Operation::DELETE_PATH >
+struct Parameter< Operation::DELETE_PATH > : public AbstractParameter
{
std::string path;
};
template<>
-struct Parameter< Operation::LIST_PATHS >
+struct Parameter< Operation::LIST_PATHS > : public AbstractParameter
{
std::shared_ptr< std::vector< std::string > > paths
= std::make_shared< std::vector< std::string > >();
};
template<>
-struct Parameter< Operation::CREATE_DATASET >
+struct Parameter< Operation::CREATE_DATASET > : public AbstractParameter
{
std::string name;
Extent extent;
@@ -137,14 +132,14 @@ struct Parameter< Operation::CREATE_DATASET >
};
template<>
-struct Parameter< Operation::EXTEND_DATASET >
+struct Parameter< Operation::EXTEND_DATASET > : public AbstractParameter
{
std::string name;
Extent extent;
};
template<>
-struct Parameter< Operation::OPEN_DATASET >
+struct Parameter< Operation::OPEN_DATASET > : public AbstractParameter
{
std::string name;
std::shared_ptr< Datatype > dtype
@@ -154,13 +149,13 @@ struct Parameter< Operation::OPEN_DATASET >
};
template<>
-struct Parameter< Operation::DELETE_DATASET >
+struct Parameter< Operation::DELETE_DATASET > : public AbstractParameter
{
std::string name;
};
template<>
-struct Parameter< Operation::WRITE_DATASET >
+struct Parameter< Operation::WRITE_DATASET > : public AbstractParameter
{
Extent extent;
Offset offset;
@@ -169,7 +164,7 @@ struct Parameter< Operation::WRITE_DATASET >
};
template<>
-struct Parameter< Operation::READ_DATASET >
+struct Parameter< Operation::READ_DATASET > : public AbstractParameter
{
Extent extent;
Offset offset;
@@ -178,20 +173,20 @@ struct Parameter< Operation::READ_DATASET >
};
template<>
-struct Parameter< Operation::LIST_DATASETS >
+struct Parameter< Operation::LIST_DATASETS > : public AbstractParameter
{
std::shared_ptr< std::vector< std::string > > datasets
= std::make_shared< std::vector< std::string > >();
};
template<>
-struct Parameter< Operation::DELETE_ATT >
+struct Parameter< Operation::DELETE_ATT > : public AbstractParameter
{
std::string name;
};
template<>
-struct Parameter< Operation::WRITE_ATT >
+struct Parameter< Operation::WRITE_ATT > : public AbstractParameter
{
Attribute::resource resource;
std::string name;
@@ -199,7 +194,7 @@ struct Parameter< Operation::WRITE_ATT >
};
template<>
-struct Parameter< Operation::READ_ATT >
+struct Parameter< Operation::READ_ATT > : public AbstractParameter
{
std::string name;
std::shared_ptr< Datatype > dtype
@@ -209,17 +204,13 @@ struct Parameter< Operation::READ_ATT >
};
template<>
-struct Parameter< Operation::LIST_ATTS >
+struct Parameter< Operation::LIST_ATTS > : public AbstractParameter
{
std::shared_ptr< std::vector< std::string > > attributes
= std::make_shared< std::vector< std::string > >();
};
-template< Operation o >
-std::map< std::string, ParameterArgument >
-structToMap(Parameter< o > const&);
-
/** @brief Self-contained description of a single IO operation.
*
* Contained are
@@ -242,11 +233,11 @@ class IOTask
Parameter< op > const& p)
: writable{w},
operation{op},
- parameter{structToMap(p)}
+ parameter{new Parameter< op >(p)}
{ }
Writable* writable;
Operation operation;
- std::map< std::string, ParameterArgument > parameter;
+ std::shared_ptr< AbstractParameter > parameter;
}; //IOTask
} // openPMD
diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp
index ca2e1ee38e..b918a383cb 100755
--- a/include/openPMD/Series.hpp
+++ b/include/openPMD/Series.hpp
@@ -1,4 +1,4 @@
-/* Copyright 2017 Fabian Koller
+/* Copyright 2017-2018 Fabian Koller
*
* This file is part of openPMD-api.
*
@@ -257,7 +257,6 @@ class Series : public Attributable
void readBase();
void read();
- static std::string cleanFilename(std::string, Format);
constexpr static char const * const OPENPMD = "1.1.0";
constexpr static char const * const BASEPATH = "/data/%T/";
@@ -265,4 +264,19 @@ class Series : public Attributable
IterationEncoding m_iterationEncoding;
std::string m_name;
}; //Series
+
+/** Determine the storage format of a Series from the used filename extension.
+ *
+ * @param filename tring containing the filename.
+ * @return Format that best fits the filename extension.
+ */
+Format determineFormat(std::string const& filename);
+
+/** Remove the filename extension of a given storage format.
+ *
+ * @param filename String containing the filename, possibly with filename extension.
+ * @param f File format to remove filename extension for.
+ * @return String containing the filename without filename extension.
+ */
+std::string cleanFilename(std::string const& filename, Format f);
} // openPMD
diff --git a/src/IO/AbstractIOHandler.cpp b/src/IO/AbstractIOHandler.cpp
index 682fe761df..6b22b6a4fb 100755
--- a/src/IO/AbstractIOHandler.cpp
+++ b/src/IO/AbstractIOHandler.cpp
@@ -1,4 +1,4 @@
-/* Copyright 2017 Fabian Koller
+/* Copyright 2017-2018 Fabian Koller
*
* This file is part of openPMD-api.
*
@@ -34,30 +34,24 @@ AbstractIOHandler::createIOHandler(std::string const& path,
Format f,
MPI_Comm comm)
{
- std::shared_ptr< AbstractIOHandler > ret{nullptr};
switch( f )
{
case Format::HDF5:
# if openPMD_HAVE_HDF5
- ret = std::make_shared< ParallelHDF5IOHandler >(path, at, comm);
+ return std::make_shared< ParallelHDF5IOHandler >(path, at, comm);
# else
std::cerr << "Parallel HDF5 backend not found. "
<< "Your IO operations will be NOOPS!" << std::endl;
- ret = std::make_shared< DummyIOHandler >(path, at);
+ return std::make_shared< DummyIOHandler >(path, at);
# endif
- break;
case Format::ADIOS1:
case Format::ADIOS2:
std::cerr << "Parallel ADIOS2 backend not yet working. "
<< "Your IO operations will be NOOPS!" << std::endl;
- ret = std::make_shared< DummyIOHandler >(path, at);
- break;
+ return std::make_shared< DummyIOHandler >(path, at);
default:
- ret = std::make_shared< DummyIOHandler >(path, at);
- break;
+ return std::make_shared< DummyIOHandler >(path, at);
}
-
- return ret;
}
AbstractIOHandler::AbstractIOHandler(std::string const& path,
@@ -72,23 +66,17 @@ AbstractIOHandler::createIOHandler(std::string const& path,
AccessType at,
Format f)
{
- std::shared_ptr< AbstractIOHandler > ret{nullptr};
switch( f )
{
case Format::HDF5:
- ret = std::make_shared< HDF5IOHandler >(path, at);
- break;
+ return std::make_shared< HDF5IOHandler >(path, at);
case Format::ADIOS1:
case Format::ADIOS2:
std::cerr << "Backend not yet working. Your IO operations will be NOOPS!" << std::endl;
- ret = std::make_shared< DummyIOHandler >(path, at);
- break;
+ return std::make_shared< DummyIOHandler >(path, at);
default:
- ret = std::make_shared< DummyIOHandler >(path, at);
- break;
+ return std::make_shared< DummyIOHandler >(path, at);
}
-
- return ret;
}
AbstractIOHandler::AbstractIOHandler(std::string const& path,
diff --git a/src/IO/AbstractIOHandlerImpl.cpp b/src/IO/AbstractIOHandlerImpl.cpp
new file mode 100644
index 0000000000..1bd4bac60b
--- /dev/null
+++ b/src/IO/AbstractIOHandlerImpl.cpp
@@ -0,0 +1,109 @@
+/* Copyright 2018 Fabian Koller
+ *
+ * This file is part of openPMD-api.
+ *
+ * openPMD-api is free software: you can redistribute it and/or modify
+ * it under the terms of of either the GNU General Public License or
+ * the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * openPMD-api is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License and the GNU Lesser General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * and the GNU Lesser General Public License along with openPMD-api.
+ * If not, see .
+ */
+#include "openPMD/IO/AbstractIOHandler.hpp"
+#include "openPMD/IO/AbstractIOHandlerImpl.hpp"
+
+
+namespace openPMD
+{
+AbstractIOHandlerImpl::AbstractIOHandlerImpl(AbstractIOHandler *handler)
+ : m_handler{handler}
+{ }
+
+AbstractIOHandlerImpl::~AbstractIOHandlerImpl()
+{ }
+
+std::future< void >
+AbstractIOHandlerImpl::flush()
+{
+ while( !(*m_handler).m_work.empty() )
+ {
+ IOTask& i = (*m_handler).m_work.front();
+ try
+ {
+ switch( i.operation )
+ {
+ using O = Operation;
+ case O::CREATE_FILE:
+ createFile(i.writable, *dynamic_cast< Parameter< Operation::CREATE_FILE >* >(i.parameter.get()));
+ break;
+ case O::CREATE_PATH:
+ createPath(i.writable, *dynamic_cast< Parameter< O::CREATE_PATH >* >(i.parameter.get()));
+ break;
+ case O::CREATE_DATASET:
+ createDataset(i.writable, *dynamic_cast< Parameter< O::CREATE_DATASET >* >(i.parameter.get()));
+ break;
+ case O::EXTEND_DATASET:
+ extendDataset(i.writable, *dynamic_cast< Parameter< O::EXTEND_DATASET >* >(i.parameter.get()));
+ break;
+ case O::OPEN_FILE:
+ openFile(i.writable, *dynamic_cast< Parameter< O::OPEN_FILE >* >(i.parameter.get()));
+ break;
+ case O::OPEN_PATH:
+ openPath(i.writable, *dynamic_cast< Parameter< O::OPEN_PATH >* >(i.parameter.get()));
+ break;
+ case O::OPEN_DATASET:
+ openDataset(i.writable, *dynamic_cast< Parameter< O::OPEN_DATASET >* >(i.parameter.get()));
+ break;
+ case O::DELETE_FILE:
+ deleteFile(i.writable, *dynamic_cast< Parameter< O::DELETE_FILE >* >(i.parameter.get()));
+ break;
+ case O::DELETE_PATH:
+ deletePath(i.writable, *dynamic_cast< Parameter< O::DELETE_PATH >* >(i.parameter.get()));
+ break;
+ case O::DELETE_DATASET:
+ deleteDataset(i.writable, *dynamic_cast< Parameter< O::DELETE_DATASET >* >(i.parameter.get()));
+ break;
+ case O::DELETE_ATT:
+ deleteAttribute(i.writable, *dynamic_cast< Parameter< O::DELETE_ATT >* >(i.parameter.get()));
+ break;
+ case O::WRITE_DATASET:
+ writeDataset(i.writable, *dynamic_cast< Parameter< O::WRITE_DATASET >* >(i.parameter.get()));
+ break;
+ case O::WRITE_ATT:
+ writeAttribute(i.writable, *dynamic_cast< Parameter< O::WRITE_ATT >* >(i.parameter.get()));
+ break;
+ case O::READ_DATASET:
+ readDataset(i.writable, *dynamic_cast< Parameter< O::READ_DATASET >* >(i.parameter.get()));
+ break;
+ case O::READ_ATT:
+ readAttribute(i.writable, *dynamic_cast< Parameter< O::READ_ATT >* >(i.parameter.get()));
+ break;
+ case O::LIST_PATHS:
+ listPaths(i.writable, *dynamic_cast< Parameter< O::LIST_PATHS >* >(i.parameter.get()));
+ break;
+ case O::LIST_DATASETS:
+ listDatasets(i.writable, *dynamic_cast< Parameter< O::LIST_DATASETS >* >(i.parameter.get()));
+ break;
+ case O::LIST_ATTS:
+ listAttributes(i.writable, *dynamic_cast< Parameter< O::LIST_ATTS >* >(i.parameter.get()));
+ break;
+ }
+ } catch (unsupported_data_error& e)
+ {
+ (*m_handler).m_work.pop();
+ throw;
+ }
+ (*m_handler).m_work.pop();
+ }
+ return std::future< void >();
+}
+} // openPMD
diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp
index cf236f061c..685bdd7d5c 100755
--- a/src/IO/HDF5/HDF5IOHandler.cpp
+++ b/src/IO/HDF5/HDF5IOHandler.cpp
@@ -1,4 +1,4 @@
-/* Copyright 2017 Fabian Koller
+/* Copyright 2017-2018 Fabian Koller
*
* This file is part of openPMD-api.
*
@@ -46,10 +46,10 @@ namespace openPMD
# endif
HDF5IOHandlerImpl::HDF5IOHandlerImpl(AbstractIOHandler* handler)
- : m_datasetTransferProperty{H5P_DEFAULT},
+ : AbstractIOHandlerImpl(handler),
+ m_datasetTransferProperty{H5P_DEFAULT},
m_fileAccessProperty{H5P_DEFAULT},
- m_H5T_BOOL_ENUM{H5Tenum_create(H5T_NATIVE_INT8)},
- m_handler{handler}
+ m_H5T_BOOL_ENUM{H5Tenum_create(H5T_NATIVE_INT8)}
{
ASSERT(m_H5T_BOOL_ENUM >= 0, "Internal error: Failed to create HDF5 enum");
std::string t{"TRUE"};
@@ -91,86 +91,13 @@ HDF5IOHandlerImpl::~HDF5IOHandlerImpl()
}
}
-std::future< void >
-HDF5IOHandlerImpl::flush()
-{
- while( !(*m_handler).m_work.empty() )
- {
- IOTask& i = (*m_handler).m_work.front();
- try
- {
- switch( i.operation )
- {
- using O = Operation;
- case O::CREATE_FILE:
- createFile(i.writable, i.parameter);
- break;
- case O::CREATE_PATH:
- createPath(i.writable, i.parameter);
- break;
- case O::CREATE_DATASET:
- createDataset(i.writable, i.parameter);
- break;
- case O::EXTEND_DATASET:
- extendDataset(i.writable, i.parameter);
- break;
- case O::OPEN_FILE:
- openFile(i.writable, i.parameter);
- break;
- case O::OPEN_PATH:
- openPath(i.writable, i.parameter);
- break;
- case O::OPEN_DATASET:
- openDataset(i.writable, i.parameter);
- break;
- case O::DELETE_FILE:
- deleteFile(i.writable, i.parameter);
- break;
- case O::DELETE_PATH:
- deletePath(i.writable, i.parameter);
- break;
- case O::DELETE_DATASET:
- deleteDataset(i.writable, i.parameter);
- break;
- case O::DELETE_ATT:
- deleteAttribute(i.writable, i.parameter);
- break;
- case O::WRITE_DATASET:
- writeDataset(i.writable, i.parameter);
- break;
- case O::WRITE_ATT:
- writeAttribute(i.writable, i.parameter);
- break;
- case O::READ_DATASET:
- readDataset(i.writable, i.parameter);
- break;
- case O::READ_ATT:
- readAttribute(i.writable, i.parameter);
- break;
- case O::LIST_PATHS:
- listPaths(i.writable, i.parameter);
- break;
- case O::LIST_DATASETS:
- listDatasets(i.writable, i.parameter);
- break;
- case O::LIST_ATTS:
- listAttributes(i.writable, i.parameter);
- break;
- }
- } catch (unsupported_data_error& e)
- {
- (*m_handler).m_work.pop();
- throw e;
- }
- (*m_handler).m_work.pop();
- }
- return std::future< void >();
-}
-
void
HDF5IOHandlerImpl::createFile(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::CREATE_FILE > const& parameters)
{
+ if( m_handler->accessType == AccessType::READ_ONLY )
+ throw std::runtime_error("Creating a file in read-only mode is not possible.");
+
if( !writable->written )
{
using namespace boost::filesystem;
@@ -179,11 +106,16 @@ HDF5IOHandlerImpl::createFile(Writable* writable,
create_directories(dir);
/* Create a new file using current properties. */
- std::string name = m_handler->directory + parameters.at("name").get< std::string >();
+ std::string name = m_handler->directory + parameters.name;
if( !auxiliary::ends_with(name, ".h5") )
name += ".h5";
+ unsigned flags;
+ if( m_handler->accessType == AccessType::CREATE )
+ flags = H5F_ACC_TRUNC;
+ else
+ flags = H5F_ACC_EXCL;
hid_t id = H5Fcreate(name.c_str(),
- H5F_ACC_TRUNC,
+ flags,
H5P_DEFAULT,
m_fileAccessProperty);
ASSERT(id >= 0, "Internal error: Failed to create HDF5 file");
@@ -198,12 +130,15 @@ HDF5IOHandlerImpl::createFile(Writable* writable,
void
HDF5IOHandlerImpl::createPath(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::CREATE_PATH > const& parameters)
{
+ if( m_handler->accessType == AccessType::READ_ONLY )
+ throw std::runtime_error("Creating a path in a file opened as read only is not possible.");
+
if( !writable->written )
{
/* Sanitize path */
- std::string path = parameters.at("path").get< std::string >();
+ std::string path = parameters.path;
if( auxiliary::starts_with(path, "/") )
path = auxiliary::replace_first(path, "/", "");
if( !auxiliary::ends_with(path, "/") )
@@ -253,11 +188,14 @@ HDF5IOHandlerImpl::createPath(Writable* writable,
void
HDF5IOHandlerImpl::createDataset(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::CREATE_DATASET > const& parameters)
{
+ if( m_handler->accessType == AccessType::READ_ONLY )
+ throw std::runtime_error("Creating a dataset in a file opened as read only is not possible.");
+
if( !writable->written )
{
- std::string name = parameters.at("name").get< std::string >();
+ std::string name = parameters.name;
if( auxiliary::starts_with(name, "/") )
name = auxiliary::replace_first(name, "/", "");
if( auxiliary::ends_with(name, "/") )
@@ -273,7 +211,7 @@ HDF5IOHandlerImpl::createDataset(Writable* writable,
H5P_DEFAULT);
ASSERT(node_id >= 0, "Internal error: Failed to open HDF5 group during dataset creation");
- Datatype d = parameters.at("dtype").get< Datatype >();
+ Datatype d = parameters.dtype;
if( d == Datatype::UNDEFINED )
{
// TODO handle unknown dtype
@@ -283,14 +221,14 @@ HDF5IOHandlerImpl::createDataset(Writable* writable,
Attribute a(0);
a.dtype = d;
std::vector< hsize_t > dims;
- for( auto const& val : parameters.at("extent").get< Extent >() )
+ for( auto const& val : parameters.extent )
dims.push_back(static_cast< hsize_t >(val));
hid_t space = H5Screate_simple(dims.size(), dims.data(), dims.data());
ASSERT(space >= 0, "Internal error: Failed to create dataspace during dataset creation");
std::vector< hsize_t > chunkDims;
- for( auto const& val : parameters.at("chunkSize").get< Extent >() )
+ for( auto const& val : parameters.chunkSize )
chunkDims.push_back(static_cast< hsize_t >(val));
/* enable chunking on the created dataspace */
@@ -299,8 +237,11 @@ HDF5IOHandlerImpl::createDataset(Writable* writable,
//status = H5Pset_chunk(datasetCreationProperty, chunkDims.size(), chunkDims.data());
//ASSERT(status == 0, "Internal error: Failed to set chunk size during dataset creation");
- std::string const& compression = parameters.at("compression").get< std::string >();
+ std::string const& compression = parameters.compression;
if( !compression.empty() )
+ std::cerr << "Compression not yet implemented in HDF5 backend."
+ << std::endl;
+ /*
{
std::vector< std::string > args = auxiliary::split(compression, ":");
std::string const& format = args[0];
@@ -318,8 +259,9 @@ HDF5IOHandlerImpl::createDataset(Writable* writable,
<< " unknown. Data will not be compressed!"
<< std::endl;
}
+ */
- std::string const& transform = parameters.at("transform").get< std::string >();
+ std::string const& transform = parameters.transform;
if( !transform.empty() )
std::cerr << "Custom transform not yet implemented in HDF5 backend."
<< std::endl;
@@ -355,8 +297,11 @@ HDF5IOHandlerImpl::createDataset(Writable* writable,
void
HDF5IOHandlerImpl::extendDataset(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::EXTEND_DATASET > const& parameters)
{
+ if( m_handler->accessType == AccessType::READ_ONLY )
+ throw std::runtime_error("Extending a dataset in a file opened as read only is not possible.");
+
if( !writable->written )
throw std::runtime_error("Extending an unwritten Dataset is not possible.");
@@ -368,7 +313,7 @@ HDF5IOHandlerImpl::extendDataset(Writable* writable,
ASSERT(node_id >= 0, "Internal error: Failed to open HDF5 group during dataset extension");
/* Sanitize name */
- std::string name = parameters.at("name").get< std::string >();
+ std::string name = parameters.name;
if( auxiliary::starts_with(name, "/") )
name = auxiliary::replace_first(name, "/", "");
if( !auxiliary::ends_with(name, "/") )
@@ -380,7 +325,7 @@ HDF5IOHandlerImpl::extendDataset(Writable* writable,
ASSERT(dataset_id >= 0, "Internal error: Failed to open HDF5 dataset during dataset extension");
std::vector< hsize_t > size;
- for( auto const& val : parameters.at("extent").get< Extent >() )
+ for( auto const& val : parameters.extent )
size.push_back(static_cast< hsize_t >(val));
herr_t status;
@@ -395,7 +340,7 @@ HDF5IOHandlerImpl::extendDataset(Writable* writable,
void
HDF5IOHandlerImpl::openFile(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::OPEN_FILE > const& parameters)
{
//TODO check if file already open
//not possible with current implementation
@@ -405,7 +350,7 @@ HDF5IOHandlerImpl::openFile(Writable* writable,
if( !exists(dir) )
throw no_such_file_error("Supplied directory is not valid: " + m_handler->directory);
- std::string name = m_handler->directory + parameters.at("name").get< std::string >();
+ std::string name = m_handler->directory + parameters.name;
if( !auxiliary::ends_with(name, ".h5") )
name += ".h5";
@@ -434,7 +379,7 @@ HDF5IOHandlerImpl::openFile(Writable* writable,
void
HDF5IOHandlerImpl::openPath(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::OPEN_PATH > const& parameters)
{
auto res = m_fileIDs.find(writable->parent);
hid_t node_id, path_id;
@@ -444,7 +389,7 @@ HDF5IOHandlerImpl::openPath(Writable* writable,
ASSERT(node_id >= 0, "Internal error: Failed to open HDF5 group during path opening");
/* Sanitize path */
- std::string path = parameters.at("path").get< std::string >();
+ std::string path = parameters.path;
if( auxiliary::starts_with(path, "/") )
path = auxiliary::replace_first(path, "/", "");
if( !auxiliary::ends_with(path, "/") )
@@ -470,7 +415,7 @@ HDF5IOHandlerImpl::openPath(Writable* writable,
void
HDF5IOHandlerImpl::openDataset(Writable* writable,
- std::map< std::string, ParameterArgument > & parameters)
+ Parameter< Operation::OPEN_DATASET > & parameters)
{
auto res = m_fileIDs.find(writable->parent);
hid_t node_id, dataset_id;
@@ -480,7 +425,7 @@ HDF5IOHandlerImpl::openDataset(Writable* writable,
ASSERT(node_id >= 0, "Internal error: Failed to open HDF5 group during dataset opening");
/* Sanitize name */
- std::string name = parameters.at("name").get< std::string >();
+ std::string name = parameters.name;
if( auxiliary::starts_with(name, "/") )
name = auxiliary::replace_first(name, "/", "");
if( !auxiliary::ends_with(name, "/") )
@@ -528,7 +473,7 @@ HDF5IOHandlerImpl::openDataset(Writable* writable,
} else
throw std::runtime_error("Unsupported dataset class");
- auto dtype = parameters.at("dtype").get< std::shared_ptr< Datatype > >();
+ auto dtype = parameters.dtype;
*dtype = d;
int ndims = H5Sget_simple_extent_ndims(dataset_space);
@@ -541,7 +486,7 @@ HDF5IOHandlerImpl::openDataset(Writable* writable,
Extent e;
for( auto const& val : dims )
e.push_back(val);
- auto extent = parameters.at("extent").get< std::shared_ptr< Extent > >();
+ auto extent = parameters.extent;
*extent = e;
herr_t status;
@@ -562,7 +507,7 @@ HDF5IOHandlerImpl::openDataset(Writable* writable,
void
HDF5IOHandlerImpl::deleteFile(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::DELETE_FILE > const& parameters)
{
if( m_handler->accessType == AccessType::READ_ONLY )
throw std::runtime_error("Deleting a file opened as read only is not possible.");
@@ -573,7 +518,7 @@ HDF5IOHandlerImpl::deleteFile(Writable* writable,
herr_t status = H5Fclose(file_id);
ASSERT(status == 0, "Internal error: Failed to close HDF5 file during file deletion");
- std::string name = m_handler->directory + parameters.at("name").get< std::string >();
+ std::string name = m_handler->directory + parameters.name;
if( !auxiliary::ends_with(name, ".h5") )
name += ".h5";
@@ -594,7 +539,7 @@ HDF5IOHandlerImpl::deleteFile(Writable* writable,
void
HDF5IOHandlerImpl::deletePath(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::DELETE_PATH > const& parameters)
{
if( m_handler->accessType == AccessType::READ_ONLY )
throw std::runtime_error("Deleting a path in a file opened as read only is not possible.");
@@ -602,7 +547,7 @@ HDF5IOHandlerImpl::deletePath(Writable* writable,
if( writable->written )
{
/* Sanitize path */
- std::string path = parameters.at("path").get< std::string >();
+ std::string path = parameters.path;
if( auxiliary::starts_with(path, "/") )
path = auxiliary::replace_first(path, "/", "");
if( !auxiliary::ends_with(path, "/") )
@@ -638,7 +583,7 @@ HDF5IOHandlerImpl::deletePath(Writable* writable,
void
HDF5IOHandlerImpl::deleteDataset(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::DELETE_DATASET > const& parameters)
{
if( m_handler->accessType == AccessType::READ_ONLY )
throw std::runtime_error("Deleting a path in a file opened as read only is not possible.");
@@ -646,7 +591,7 @@ HDF5IOHandlerImpl::deleteDataset(Writable* writable,
if( writable->written )
{
/* Sanitize name */
- std::string name = parameters.at("name").get< std::string >();
+ std::string name = parameters.name;
if( auxiliary::starts_with(name, "/") )
name = auxiliary::replace_first(name, "/", "");
if( !auxiliary::ends_with(name, "/") )
@@ -682,14 +627,14 @@ HDF5IOHandlerImpl::deleteDataset(Writable* writable,
void
HDF5IOHandlerImpl::deleteAttribute(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::DELETE_ATT > const& parameters)
{
if( m_handler->accessType == AccessType::READ_ONLY )
throw std::runtime_error("Deleting an attribute in a file opened as read only is not possible.");
if( writable->written )
{
- std::string name = parameters.at("name").get< std::string >();
+ std::string name = parameters.name;
/* Open H5Object to delete in */
auto res = m_fileIDs.find(writable);
@@ -711,8 +656,11 @@ HDF5IOHandlerImpl::deleteAttribute(Writable* writable,
void
HDF5IOHandlerImpl::writeDataset(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::WRITE_DATASET > const& parameters)
{
+ if( m_handler->accessType == AccessType::READ_ONLY )
+ throw std::runtime_error("Writing into a dataset in a file opened as read only is not possible.");
+
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
res = m_fileIDs.find(writable->parent);
@@ -725,12 +673,12 @@ HDF5IOHandlerImpl::writeDataset(Writable* writable,
ASSERT(dataset_id >= 0, "Internal error: Failed to open HDF5 dataset during dataset write");
std::vector< hsize_t > start;
- for( auto const& val : parameters.at("offset").get< Offset >() )
+ for( auto const& val : parameters.offset )
start.push_back(static_cast< hsize_t >(val));
std::vector< hsize_t > stride(start.size(), 1); /* contiguous region */
std::vector< hsize_t > count(start.size(), 1); /* single region */
std::vector< hsize_t > block;
- for( auto const& val : parameters.at("extent").get< Extent >() )
+ for( auto const& val : parameters.extent )
block.push_back(static_cast< hsize_t >(val));
memspace = H5Screate_simple(block.size(), block.data(), nullptr);
filespace = H5Dget_space(dataset_id);
@@ -742,10 +690,11 @@ HDF5IOHandlerImpl::writeDataset(Writable* writable,
block.data());
ASSERT(status == 0, "Internal error: Failed to select hyperslab during dataset write");
- std::shared_ptr< void > data = parameters.at("data").get< std::shared_ptr< void > >();
+ std::shared_ptr< void > data = parameters.data;
+ //TODO Check if parameter dtype and dataset dtype match
Attribute a(0);
- a.dtype = parameters.at("dtype").get< Datatype >();
+ a.dtype = parameters.dtype;
hid_t dataType = getH5DataType(a);
ASSERT(dataType >= 0, "Internal error: Failed to get HDF5 datatype during dataset write");
switch( a.dtype )
@@ -771,7 +720,7 @@ HDF5IOHandlerImpl::writeDataset(Writable* writable,
ASSERT(status == 0, "Internal error: Failed to write dataset " + concrete_h5_file_position(writable));
break;
case DT::UNDEFINED:
- throw std::runtime_error("Unknown Attribute datatype");
+ throw std::runtime_error("Undefined Attribute datatype");
case DT::DATATYPE:
throw std::runtime_error("Meta-Datatype leaked into IO");
default:
@@ -791,8 +740,11 @@ HDF5IOHandlerImpl::writeDataset(Writable* writable,
void
HDF5IOHandlerImpl::writeAttribute(Writable* writable,
- ArgumentMap const& parameters)
+ Parameter< Operation::WRITE_ATT > const& parameters)
{
+ if( m_handler->accessType == AccessType::READ_ONLY )
+ throw std::runtime_error("Writing an attribute in a file opened as read only is not possible.");
+
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
res = m_fileIDs.find(writable->parent);
@@ -801,9 +753,8 @@ HDF5IOHandlerImpl::writeAttribute(Writable* writable,
concrete_h5_file_position(writable).c_str(),
H5P_DEFAULT);
ASSERT(node_id >= 0, "Internal error: Failed to open HDF5 object during attribute write");
- std::string name = parameters.at("name").get< std::string >();
- Attribute const att(parameters.at("attribute").get< Attribute::resource >());
- Datatype dtype = parameters.at("dtype").get< Datatype >();
+ Attribute const att(parameters.resource);
+ Datatype dtype = parameters.dtype;
herr_t status;
hid_t dataType;
if( dtype == Datatype::BOOL )
@@ -811,6 +762,7 @@ HDF5IOHandlerImpl::writeAttribute(Writable* writable,
else
dataType = getH5DataType(att);
ASSERT(dataType >= 0, "Internal error: Failed to get HDF5 datatype during attribute write");
+ std::string name = parameters.name;
if( H5Aexists(node_id, name.c_str()) == 0 )
{
hid_t dataspace = getH5DataSpace(att);
@@ -1008,7 +960,7 @@ HDF5IOHandlerImpl::writeAttribute(Writable* writable,
void
HDF5IOHandlerImpl::readDataset(Writable* writable,
- std::map< std::string, ParameterArgument > & parameters)
+ Parameter< Operation::READ_DATASET > & parameters)
{
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
@@ -1021,12 +973,12 @@ HDF5IOHandlerImpl::readDataset(Writable* writable,
ASSERT(dataset_id >= 0, "Internal error: Failed to open HDF5 dataset during dataset read");
std::vector< hsize_t > start;
- for( auto const& val : parameters.at("offset").get< Offset >() )
+ for( auto const& val : parameters.offset )
start.push_back(static_cast(val));
std::vector< hsize_t > stride(start.size(), 1); /* contiguous region */
std::vector< hsize_t > count(start.size(), 1); /* single region */
std::vector< hsize_t > block;
- for( auto const& val : parameters.at("extent").get< Extent >() )
+ for( auto const& val : parameters.extent )
block.push_back(static_cast< hsize_t >(val));
memspace = H5Screate_simple(block.size(), block.data(), nullptr);
filespace = H5Dget_space(dataset_id);
@@ -1038,10 +990,10 @@ HDF5IOHandlerImpl::readDataset(Writable* writable,
block.data());
ASSERT(status == 0, "Internal error: Failed to select hyperslab during dataset read");
- void* data = parameters.at("data").get< void* >();
+ void* data = parameters.data;
Attribute a(0);
- a.dtype = parameters.at("dtype").get< Datatype >();
+ a.dtype = parameters.dtype;
switch( a.dtype )
{
using DT = Datatype;
@@ -1086,7 +1038,7 @@ HDF5IOHandlerImpl::readDataset(Writable* writable,
void
HDF5IOHandlerImpl::readAttribute(Writable* writable,
- std::map< std::string, ParameterArgument >& parameters)
+ Parameter< Operation::READ_ATT >& parameters)
{
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
@@ -1098,7 +1050,7 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable,
concrete_h5_file_position(writable).c_str(),
H5P_DEFAULT);
ASSERT(obj_id >= 0, "Internal error: Failed to open HDF5 object during attribute read");
- std::string const & attr_name = parameters.at("name").get< std::string >();
+ std::string const & attr_name = parameters.name;
attr_id = H5Aopen(obj_id,
attr_name.c_str(),
H5P_DEFAULT);
@@ -1375,9 +1327,9 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable,
status = H5Sclose(attr_space);
ASSERT(status == 0, "Internal error: Failed to close attribute file space during attribute read");
- auto dtype = parameters.at("dtype").get< std::shared_ptr< Datatype > >();
+ auto dtype = parameters.dtype;
*dtype = a.dtype;
- auto resource = parameters.at("resource").get< std::shared_ptr< Attribute::resource > >();
+ auto resource = parameters.resource;
*resource = a.getResource();
status = H5Aclose(attr_id);
@@ -1388,7 +1340,7 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable,
void
HDF5IOHandlerImpl::listPaths(Writable* writable,
- std::map< std::string, ParameterArgument > & parameters)
+ Parameter< Operation::LIST_PATHS > & parameters)
{
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
@@ -1402,7 +1354,7 @@ HDF5IOHandlerImpl::listPaths(Writable* writable,
herr_t status = H5Gget_info(node_id, &group_info);
ASSERT(status == 0, "Internal error: Failed to get HDF5 group info for " + concrete_h5_file_position(writable) + " during path listing");
- auto paths = parameters.at("paths").get< std::shared_ptr< std::vector< std::string > > >();
+ auto paths = parameters.paths;
for( hsize_t i = 0; i < group_info.nlinks; ++i )
{
if( H5G_GROUP == H5Gget_objtype_by_idx(node_id, i) )
@@ -1420,7 +1372,7 @@ HDF5IOHandlerImpl::listPaths(Writable* writable,
void
HDF5IOHandlerImpl::listDatasets(Writable* writable,
- std::map< std::string, ParameterArgument >& parameters)
+ Parameter< Operation::LIST_DATASETS >& parameters)
{
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
@@ -1434,7 +1386,7 @@ HDF5IOHandlerImpl::listDatasets(Writable* writable,
herr_t status = H5Gget_info(node_id, &group_info);
ASSERT(status == 0, "Internal error: Failed to get HDF5 group info for " + concrete_h5_file_position(writable) + " during dataset listing");
- auto datasets = parameters.at("datasets").get< std::shared_ptr< std::vector< std::string > > >();
+ auto datasets = parameters.datasets;
for( hsize_t i = 0; i < group_info.nlinks; ++i )
{
if( H5G_DATASET == H5Gget_objtype_by_idx(node_id, i) )
@@ -1451,7 +1403,7 @@ HDF5IOHandlerImpl::listDatasets(Writable* writable,
}
void HDF5IOHandlerImpl::listAttributes(Writable* writable,
- std::map< std::string, ParameterArgument >& parameters)
+ Parameter< Operation::LIST_ATTS >& parameters)
{
auto res = m_fileIDs.find(writable);
if( res == m_fileIDs.end() )
@@ -1467,7 +1419,7 @@ void HDF5IOHandlerImpl::listAttributes(Writable* writable,
status = H5Oget_info(node_id, &object_info);
ASSERT(status == 0, "Internal error: Failed to get HDF5 object info for " + concrete_h5_file_position(writable) + " during attribute listing");
- auto strings = parameters.at("attributes").get< std::shared_ptr< std::vector< std::string > > >();
+ auto attributes = parameters.attributes;
for( hsize_t i = 0; i < object_info.num_attrs; ++i )
{
ssize_t name_length = H5Aget_name_by_idx(node_id,
@@ -1487,7 +1439,7 @@ void HDF5IOHandlerImpl::listAttributes(Writable* writable,
name.data(),
name_length+1,
H5P_DEFAULT);
- strings->push_back(std::string(name.data(), name_length));
+ attributes->push_back(std::string(name.data(), name_length));
}
status = H5Oclose(node_id);
diff --git a/src/IO/IOTask.cpp b/src/IO/IOTask.cpp
deleted file mode 100755
index edf45181b0..0000000000
--- a/src/IO/IOTask.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-#include "openPMD/IO/IOTask.hpp"
-
-
-namespace openPMD
-{
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::CREATE_FILE > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::OPEN_FILE > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::DELETE_FILE > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::CREATE_PATH > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"path", ParameterArgument(p.path)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::OPEN_PATH > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"path", ParameterArgument(p.path)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::DELETE_PATH > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"path", ParameterArgument(p.path)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::LIST_PATHS > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"paths", ParameterArgument(p.paths)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::CREATE_DATASET > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- ret.insert({"extent", ParameterArgument(p.extent)});
- ret.insert({"dtype", ParameterArgument(p.dtype)});
- ret.insert({"chunkSize", ParameterArgument(p.chunkSize)});
- ret.insert({"compression", ParameterArgument(p.transform)});
- ret.insert({"transform", ParameterArgument(p.transform)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::EXTEND_DATASET > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- ret.insert({"extent", ParameterArgument(p.extent)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::OPEN_DATASET > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- ret.insert({"dtype", ParameterArgument(p.dtype)});
- ret.insert({"extent", ParameterArgument(p.extent)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::DELETE_DATASET > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::WRITE_DATASET > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"extent", ParameterArgument(p.extent)});
- ret.insert({"offset", ParameterArgument(p.offset)});
- ret.insert({"dtype", ParameterArgument(p.dtype)});
- ret.insert({"data", ParameterArgument(p.data)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::READ_DATASET > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"extent", ParameterArgument(p.extent)});
- ret.insert({"offset", ParameterArgument(p.offset)});
- ret.insert({"dtype", ParameterArgument(p.dtype)});
- ret.insert({"data", ParameterArgument(p.data)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::LIST_DATASETS > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"datasets", ParameterArgument(p.datasets)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::DELETE_ATT > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::WRITE_ATT > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- ret.insert({"dtype", ParameterArgument(p.dtype)});
- ret.insert({"attribute", ParameterArgument(p.resource)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::READ_ATT > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"name", ParameterArgument(p.name)});
- ret.insert({"dtype", ParameterArgument(p.dtype)});
- ret.insert({"resource", ParameterArgument(p.resource)});
- return ret;
-}
-
-template<>
-std::map< std::string, ParameterArgument > structToMap(Parameter< Operation::LIST_ATTS > const& p)
-{
- std::map< std::string, ParameterArgument > ret;
- ret.insert({"attributes", ParameterArgument(p.attributes)});
- return ret;
-}
-} // openPMD
-
diff --git a/src/Series.cpp b/src/Series.cpp
index f05422fc63..d15a6f7b1d 100755
--- a/src/Series.cpp
+++ b/src/Series.cpp
@@ -1,4 +1,4 @@
-/* Copyright 2017 Fabian Koller
+/* Copyright 2017-2018 Fabian Koller
*
* This file is part of openPMD-api.
*
@@ -30,16 +30,6 @@
namespace openPMD
{
-void
-check_extension(std::string const& filepath)
-{
- if( !auxiliary::ends_with(filepath, ".h5") &&
- !auxiliary::ends_with(filepath, ".bp") &&
- !auxiliary::ends_with(filepath, ".dummy") )
- throw std::runtime_error("File format not recognized. "
- "Did you append a correct filename extension?");
-}
-
#if openPMD_HAVE_MPI
Series
Series::create(std::string const& filepath,
@@ -49,8 +39,6 @@ Series::create(std::string const& filepath,
if( AccessType::READ_ONLY == at )
throw std::runtime_error("Access type not supported in create-API.");
- check_extension(filepath);
-
return Series(filepath, at, comm);
}
#endif
@@ -62,8 +50,6 @@ Series::create(std::string const& filepath,
if( AccessType::READ_ONLY == at )
throw std::runtime_error("Access type not supported in create-API.");
- check_extension(filepath);
-
return Series(filepath, at);
}
@@ -76,8 +62,6 @@ Series::read(std::string const& filepath,
if( AccessType::CREATE == at )
throw std::runtime_error("Access type not supported in read-API.");
- check_extension(filepath);
-
return Series(filepath, at, comm);
}
#endif
@@ -89,8 +73,6 @@ Series::read(std::string const& filepath,
if( AccessType::CREATE == at )
throw std::runtime_error("Access type not supported in read-API.");
- check_extension(filepath);
-
return Series(filepath, at);
}
@@ -121,19 +103,8 @@ Series::Series(std::string const& filepath,
else
ie = IterationEncoding::groupBased;
- Format f;
- if( auxiliary::ends_with(name, ".h5") )
- f = Format::HDF5;
- else if( auxiliary::ends_with(name, ".bp") )
- f = Format::ADIOS1;
- else
- {
- if( !auxiliary::ends_with(name, ".dummy") )
- std::cerr << "Unknown filename extension. "
- "Falling back to DUMMY format."
- << std::endl;
- f = Format::DUMMY;
- }
+
+ Format f = determineFormat(name);
IOHandler = AbstractIOHandler::createIOHandler(path, at, f, comm);
iterations.IOHandler = IOHandler;
@@ -190,19 +161,7 @@ Series::Series(std::string const& filepath,
else
ie = IterationEncoding::groupBased;
- Format f;
- if( auxiliary::ends_with(name, ".h5") )
- f = Format::HDF5;
- else if( auxiliary::ends_with(name, ".bp") )
- f = Format::ADIOS1;
- else
- {
- if( !auxiliary::ends_with(name, ".dummy") )
- std::cerr << "Unknown filename extension. "
- "Falling back to DUMMY format."
- << std::endl;
- f = Format::DUMMY;
- }
+ Format f = determineFormat(name);
IOHandler = AbstractIOHandler::createIOHandler(path, at, f);
iterations.IOHandler = IOHandler;
@@ -789,25 +748,33 @@ Series::read()
readAttributes();
}
+Format
+determineFormat(std::string const& filename)
+{
+ if( auxiliary::ends_with(filename, ".h5") )
+ return Format::HDF5;
+ if( auxiliary::ends_with(filename, ".bp") )
+ return Format::ADIOS1;
+
+ if( std::string::npos != filename.find('.') /* extension is provided */ )
+ std::cerr << "Unknown storage format. "
+ "Did you append a correct filename extension? "
+ "Your IO operations will be NOOPS!" << std::endl;
+ return Format::DUMMY;
+}
+
std::string
-Series::cleanFilename(std::string s, Format f)
+cleanFilename(std::string const& filename, Format f)
{
switch( f )
{
case Format::HDF5:
- s = auxiliary::replace_last(s, ".h5", "");
- break;
+ return auxiliary::replace_last(filename, ".h5", "");
case Format::ADIOS1:
case Format::ADIOS2:
- s = auxiliary::replace_last(s, ".bp", "");
- break;
- case Format::DUMMY:
- s = auxiliary::replace_last(s, ".dummy", "");
- break;
+ return auxiliary::replace_last(filename, ".bp", "");
default:
- break;
+ return filename;
}
-
- return s;
}
} // openPMD
diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp
index 083396cc15..e59959a970 100755
--- a/test/CoreTest.cpp
+++ b/test/CoreTest.cpp
@@ -68,7 +68,7 @@ TEST_CASE( "attribute_dtype_test", "[core]" )
TEST_CASE( "output_default_test", "[core]" )
{
using IE = IterationEncoding;
- Series o = Series::create("./new_openpmd_output_%T.dummy");
+ Series o = Series::create("./new_openpmd_output_%T");
REQUIRE(o.openPMD() == "1.1.0");
REQUIRE(o.openPMDextension() == static_cast(0));
@@ -85,7 +85,7 @@ TEST_CASE( "output_default_test", "[core]" )
TEST_CASE( "output_constructor_test", "[core]" )
{
using IE = IterationEncoding;
- Series o = Series::create("./MyCustomOutput.dummy");
+ Series o = Series::create("./MyCustomOutput");
o.setMeshesPath("customMeshesPath").setParticlesPath("customParticlesPath");
@@ -106,7 +106,7 @@ TEST_CASE( "output_constructor_test", "[core]" )
TEST_CASE( "output_modification_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
o.setOpenPMD("1.0.0");
REQUIRE(o.openPMD() == "1.0.0");
@@ -131,7 +131,7 @@ TEST_CASE( "output_modification_test", "[core]" )
TEST_CASE( "iteration_default_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Iteration& i = o.iterations[42];
@@ -145,7 +145,7 @@ TEST_CASE( "iteration_default_test", "[core]" )
TEST_CASE( "iteration_modification_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Iteration& i = o.iterations[42];
@@ -163,7 +163,7 @@ TEST_CASE( "iteration_modification_test", "[core]" )
TEST_CASE( "particleSpecies_modification_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
auto& particles = o.iterations[42].particles;
REQUIRE(0 == particles.numAttributes());
@@ -201,7 +201,7 @@ TEST_CASE( "particleSpecies_modification_test", "[core]" )
TEST_CASE( "record_constructor_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Record& r = o.iterations[42].particles["species"]["record"];
@@ -219,7 +219,7 @@ TEST_CASE( "record_constructor_test", "[core]" )
TEST_CASE( "record_modification_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Record& r = o.iterations[42].particles["species"]["record"];
@@ -243,7 +243,7 @@ TEST_CASE( "record_modification_test", "[core]" )
TEST_CASE( "recordComponent_modification_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Record& r = o.iterations[42].particles["species"]["record"];
@@ -261,7 +261,7 @@ TEST_CASE( "recordComponent_modification_test", "[core]" )
TEST_CASE( "mesh_constructor_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Mesh &m = o.iterations[42].meshes["E"];
@@ -289,7 +289,7 @@ TEST_CASE( "mesh_constructor_test", "[core]" )
TEST_CASE( "mesh_modification_test", "[core]" )
{
- Series o = Series::create("./MyOutput_%T.dummy");
+ Series o = Series::create("./MyOutput_%T");
Mesh &m = o.iterations[42].meshes["E"];
m["x"];
@@ -328,7 +328,7 @@ TEST_CASE( "mesh_modification_test", "[core]" )
TEST_CASE( "structure_test", "[core]" )
{
- Series o = Series::create("./new_openpmd_output_%T.dummy");
+ Series o = Series::create("./new_openpmd_output_%T");
REQUIRE(o.IOHandler);
REQUIRE(o.iterations.IOHandler);