From 7bce438596fa7cee2635d659e00202f8500ca9e2 Mon Sep 17 00:00:00 2001 From: Claudius 'keldu' Holeksa Date: Wed, 11 Sep 2024 18:54:48 +0200 Subject: wip --- modules/codec/c++/data_raw.hpp | 70 ++++++++++++++-------------------- modules/remote-hip/c++/device.tmpl.hpp | 27 +++++++++++++ 2 files changed, 55 insertions(+), 42 deletions(-) (limited to 'modules') diff --git a/modules/codec/c++/data_raw.hpp b/modules/codec/c++/data_raw.hpp index fabf77a..db57af3 100644 --- a/modules/codec/c++/data_raw.hpp +++ b/modules/codec/c++/data_raw.hpp @@ -412,7 +412,8 @@ class data, encode::NativeRaw> { // rawr // data, encode::NativeRaw> dims_; std::array dims_; - std::vector::Type> value_; + typename raw_native_array_type_helper::Type* value_; + uint64_t value_size_; uint64_t get_full_size() const { uint64_t s = 1; @@ -426,7 +427,8 @@ class data, encode::NativeRaw> { } public: data(): - value_{} + value_{nullptr}, + value_size_{0u} { for(auto& iter : dims_){ iter = 0u; @@ -437,20 +439,24 @@ class data, encode::NativeRaw> { * Purely internal function for working C interfacing */ typename raw_native_array_type_helper::Type* get_raw_data() { - if(value_.size() == 0u){ + if(value_size_ == 0u){ return nullptr; } return &(value_[0]); } - SAW_DEFAULT_COPY(data); - SAW_DEFAULT_MOVE(data); + SAW_FORBID_COPY(data); + SAW_FORBID_MOVE(data); data(const std::array& i): dims_{i}, - value_{} + value_{nullptr}, + value_size_{0u} { - value_.resize(get_full_size()); + // auto old_value = value_; + // auto old_value_size = value_size_; + value_size_ = get_full_size(); + value_ = new typename raw_native_array_type_helper::Type (value_size_); } data(data init) @@ -458,36 +464,16 @@ class data, encode::NativeRaw> { for(uint64_t i = 0; i < Dim; ++i){ dims_.at(i) = init.at(i).get(); } - value_.resize(get_full_size()); + value_size_ = get_full_size(); + value_ = new typename raw_native_array_type_helper::Type (value_size_); } - template - error_or add(saw::data data){ - /** @todo - * Generally the last dimension can always accept a element so to say. - * Changing the others would require moving data due to the stride changing. - * Since the last dimension doesn't affect the stride, we don't need reordering there. - * But I want a quick solution for one dimension so here we are. - * - * I can always ignore strides and use a stacked std::vector - * std::vector> and so on. - * But for now I'm keeping the strides. Smaller chunks of memory aren't to bad either - * though. - * I'll probably change it to the smaller chunks - */ - static_assert(Dim == 1, "Currently can't deal with higher dims"); - static_assert(i < Dim, "Can't add to dimension. Index i larger than dimension size"); - - try { - value_.emplace_back(std::move(data)); - }catch(const std::exception& e){ - (void) e; - return make_error(); + ~data(){ + if(value_){ + delete value_; + value_ = nullptr; + value_size_ = 0u; } - - ++dims_.at(i); - - return void_t{}; } template @@ -498,38 +484,38 @@ class data, encode::NativeRaw> { } data, encode::NativeRaw> at(const std::array& ind){ - return {value_.at(this->get_flat_index(ind))}; + return {value_[this->get_flat_index(ind)]}; } const data, encode::NativeRaw> at(const std::array& ind) const { - return {value_.at(this->get_flat_index(ind))}; + return {value_[this->get_flat_index(ind)]}; } template data, encode::NativeRaw> at(Dims... i){ - return {value_.at(this->get_flat_index(std::array{static_cast(i)...}))}; + return {value_[this->get_flat_index(std::array{static_cast(i)...})]}; } template const data, encode::NativeRaw> at(Dims... i) const { - return {value_.at(this->get_flat_index(std::array{static_cast(i)...}))}; + return {value_[this->get_flat_index(std::array{static_cast(i)...})]}; } template data,encode::NativeRaw> at(const data, Encoding>& i){ - return {value_.at(this->get_flat_index(i))}; + return {value_[this->get_flat_index(i)]}; } template const data,encode::NativeRaw> at(const data, Encoding>& i)const{ - return {value_.at(this->get_flat_index(i))}; + return {value_[this->get_flat_index(i)]}; } std::size_t get_dim_size(uint64_t i) const { return dims_.at(i); } - size_t size() const { return value_.size();} + uint64_t size() const { return value_size_;} data, encode::NativeRaw> get_dims() const { return {dims_}; @@ -538,7 +524,7 @@ class data, encode::NativeRaw> { private: template uint64_t get_flat_index(const U& i) const { - assert(value_.size() == get_full_size()); + assert(value_size_ == get_full_size()); uint64_t s = 0; uint64_t stride = 1; diff --git a/modules/remote-hip/c++/device.tmpl.hpp b/modules/remote-hip/c++/device.tmpl.hpp index 4777660..6edf431 100644 --- a/modules/remote-hip/c++/device.tmpl.hpp +++ b/modules/remote-hip/c++/device.tmpl.hpp @@ -11,6 +11,7 @@ struct hip_copy_to_device { template struct hip_copy_to_device, Encoding> { using Schema = schema::Primitive; + static error_or apply(data& from, data** to){ hipError_t malloc_err = hipMalloc(to, sizeof(data)); // HIP_CHECK(malloc_err); @@ -21,5 +22,31 @@ struct hip_copy_to_device, Encoding> { return make_void(); } }; + +template +struct hip_copy_to_device, Encoding> { + static_assert(Dim == 1u, "Only 1D arrays are supported for now."); + static_assert(is_primitive::value, "Arrays can only handle primitives for now."); + + using Schema = schema::Array; + + static error_or apply(data& from, data** to){ + typename native_data_type::type* dat{}; + hipError_t data_malloc_err = hipMalloc(&dat,sizeof(typename native_data_type::type) * from.size()); + hipError_t data_copy_err = hipMemcpy(&dat, &(from.get_raw_data()),sizeof(typename native_data_type::type) * from.size(), hipMemcpyHostToDevice); + + if(from.size() == 0u){ + // Everything is fine. We just don't want to allocate data which doesn't exist. + return make_void(); + } + + // auto from_dat = &from.at(0); + + hipError_t malloc_err = hipMalloc(to, sizeof(data)); + hipError_t copy_err = hipMemcpy(*to, &from, sizeof(data), hipMemcpyHostToDevice); + + return make_void(); + } +}; } } -- cgit v1.2.3