12 #ifndef QMCPLUSPLUS_HDF_MULTI_INTERFACE_H 13 #define QMCPLUSPLUS_HDF_MULTI_INTERFACE_H 15 #include <multi/array.hpp> 16 #include <multi/array_ref.hpp> 21 #if defined(ENABLE_CUDA) || defined(ENABLE_HIP) 22 #include "AFQMC/Memory/device_pointers.hpp" 32 template<
typename T,
class Alloc>
42 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
44 using iextensions =
typename boost::multi::iextensions<1u>;
46 ref.reextent(iextensions{
static_cast<boost::multi::size_t
>(
dims[0])});
50 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 56 template<
typename T,
class Alloc>
66 dims[0] = std::get<0>(a.sizes());
67 dims[1] = std::get<1>(a.sizes());
70 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
73 ref.reextent({
static_cast<boost::multi::size_t
>(
dims[0]), static_cast<boost::multi::size_t>(
dims[1])});
77 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 83 template<
typename T,
class Ptr>
89 using data_type = boost::multi::array_ref<T, 1, Ptr>;
93 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
99 std::cerr <<
" Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl;
100 std::cerr <<
dims[0] <<
" " << std::get<0>(ref.sizes()) << std::endl;
107 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 113 template<
typename T,
class Ptr>
123 dims[0] = std::get<0>(a.sizes());
124 dims[1] = std::get<1>(a.sizes());
127 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
133 std::cerr <<
" Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl;
134 std::cerr <<
dims[0] <<
" " <<
dims[1] <<
" " << std::get<0>(ref.sizes()) <<
" " << std::get<1>(ref.sizes()) << std::endl;
141 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 149 #if defined(ENABLE_CUDA) || defined(ENABLE_HIP) 153 struct h5data_proxy<boost::multi::array<T, 1, device::device_allocator<T>>> :
public h5_space_type<T, 1>
158 using data_type = boost::multi::array<T, 1, device::device_allocator<T>>;
162 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
165 ref.reextent({
dims[0]});
166 auto sz = ref.num_elements();
167 using iextensions =
typename boost::multi::iextensions<1u>;
168 boost::multi::array<T, 1> buf(iextensions{sz});
174 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 176 throw std::runtime_error(
" write from gpu not implemented yet.");
182 struct h5data_proxy<boost::multi::array<T, 2, device::device_allocator<T>>> :
public h5_space_type<T, 2>
187 using data_type = boost::multi::array<T, 2, device::device_allocator<T>>;
195 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
199 auto sz = ref.num_elements();
200 using iextensions =
typename boost::multi::iextensions<1u>;
201 boost::multi::array<T, 1> buf(iextensions{sz});
207 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 209 throw std::runtime_error(
" write from gpu not implemented yet.");
215 struct h5data_proxy<boost::multi::array_ref<T, 1, device::device_pointer<T>>> :
public h5_space_type<T, 1>
220 using data_type = boost::multi::array_ref<T, 1, device::device_pointer<T>>;
224 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
230 std::cerr <<
" Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl;
231 std::cerr <<
dims[0] <<
" " << ref.size(0) << std::endl;
235 auto sz = ref.num_elements();
236 using iextensions =
typename boost::multi::iextensions<1u>;
237 boost::multi::array<T, 1> buf(iextensions{sz});
243 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 245 throw std::runtime_error(
" write from gpu not implemented yet.");
251 struct h5data_proxy<boost::multi::array_ref<T, 2, device::device_pointer<T>>> :
public h5_space_type<T, 2>
256 using data_type = boost::multi::array_ref<T, 2, device::device_pointer<T>>;
260 dims[0] = std::get<0>(a.sizes());
261 dims[1] = std::get<1>(a.sizes());
264 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
270 std::cerr <<
" Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl;
271 std::cerr <<
dims[0] <<
" " <<
dims[1] <<
" " << std::get<0>(ref.sizes()) <<
" " << std::get<1>(ref.sizes()) << std::endl;
275 auto sz = ref.num_elements();
276 using iextensions =
typename boost::multi::iextensions<1u>;
277 boost::multi::array<T, 1> buf(iextensions{sz});
283 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 285 throw std::runtime_error(
" write from gpu not implemented yet.");
290 template<
typename T,
unsigned RANK>
291 struct h5data_proxy<hyperslab_proxy<boost::multi::array<T, 2, device::device_allocator<T>>, RANK>>
293 using CT = boost::multi::array<T, 2, device::device_allocator<T>>;
294 using data_type = hyperslab_proxy<CT, RANK>;
298 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
303 auto sz = ref.ref.num_elements();
304 boost::multi::array<T, 1> buf(
typename boost::multi::layout_t<1u>::extensions_type{sz});
305 auto ret =
h5d_read(grp, aname.c_str(), ref.slab_rank, ref.slab_dims.data(), ref.slab_dims_local.data(),
306 ref.slab_offset.data(), buf.origin(), xfer_plist);
312 int rank = ref.slab_rank;
313 if (!checkShapeConsistency<T>(grp, aname,
rank, ref.slab_dims.data(),
true))
315 std::cerr <<
" Disabled hyperslab resize with boost::multi::array<gpu_allocator>.\n";
318 return h5d_read(grp, aname, ref.data(), xfer_plist);
322 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 324 std::cerr <<
" Disabled hyperslab write with boost::multi::array<gpu_allocator>.\n";
329 template<
typename T,
unsigned RANK>
330 struct h5data_proxy<hyperslab_proxy<boost::multi::array_ref<T, 2, device::device_pointer<T>>, RANK>>
332 using CT = boost::multi::array_ref<T, 2, device::device_pointer<T>>;
333 using data_type = hyperslab_proxy<CT, RANK>;
337 inline bool read(
data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
342 auto sz = ref.ref.num_elements();
343 boost::multi::array<T, 1> buf(
typename boost::multi::layout_t<1u>::extensions_type{sz});
344 auto ret =
h5d_read(grp, aname.c_str(), ref.slab_rank, ref.slab_dims.data(), ref.slab_dims_local.data(),
345 ref.slab_offset.data(), buf.origin(), xfer_plist);
351 int rank = ref.slab_rank;
352 if (!checkShapeConsistency<T>(grp, aname,
rank, ref.slab_dims.data(),
true))
354 std::cerr <<
" Disabled hyperslab resize with boost::multi::array_ref<gpu_ptr>.\n";
357 return h5d_read(grp, aname, ref.data(), xfer_plist);
361 inline bool write(
const data_type& ref, hid_t grp,
const std::string& aname, hid_t xfer_plist = H5P_DEFAULT)
const 363 std::cerr <<
" Disabled hyperslab write with boost::multi::array_ref<gpu_ptr>.\n";
boost::multi::array< T, 2, Alloc > data_type
default struct to define a h5 dataspace, any intrinsic type T
bool write(const data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT) const
h5data_proxy(const data_type &a)
helper functions for EinsplineSetBuilder
bool write(const data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT) const
boost::multi::array_ref< T, 1, Ptr > data_type
bool read(data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT)
bool write(const data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT) const
boost::multi::array< T, 1, Alloc > data_type
bool write(const data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT) const
h5data_proxy(const data_type &a)
bool read(data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT)
hsize_t dims[RANK > 0 ? RANK :1]
shape of the dataspace, protected for zero size array, hdf5 support scalar as rank = 0 ...
boost::multi::array_ref< T, 2, Ptr > data_type
bool read(data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT)
bool read(data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT)
bool write(const data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT) const
h5data_proxy(const data_type &a)
static auto get_address(T *a)
return the address
sycl::event copy_n(sycl::queue &aq, const T1 *restrict VA, size_t array_size, T2 *restrict VC, const std::vector< sycl::event > &events)
static constexpr hsize_t rank
rank of the multidimensional dataspace
h5data_proxy(const data_type &a)
h5data_proxy(const data_type &a)
bool h5d_read(hid_t grp, const std::string &aname, T *first, hid_t xfer_plist)
return true, if successful
h5_space_type< T, 0 > FileSpace
bool read(data_type &ref, hid_t grp, const std::string &aname, hid_t xfer_plist=H5P_DEFAULT)
bool h5d_write(hid_t grp, const std::string &aname, hsize_t ndims, const hsize_t *dims, const T *first, hid_t xfer_plist)
generic h5data_proxy<T> for scalar basic datatypes defined in hdf_dataspace.h Note if the dataset to ...