summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorClaudius 'keldu' Holeksa <mail@keldu.de>2024-09-11 18:54:48 +0200
committerClaudius 'keldu' Holeksa <mail@keldu.de>2024-09-11 18:54:48 +0200
commit7bce438596fa7cee2635d659e00202f8500ca9e2 (patch)
treeda4520b5b5fdb4bf958c7a1999fb98c195522fbd /modules
parentb62e2a5e7293ca9e534c670368166b29268119f9 (diff)
wip
Diffstat (limited to 'modules')
-rw-r--r--modules/codec/c++/data_raw.hpp70
-rw-r--r--modules/remote-hip/c++/device.tmpl.hpp27
2 files changed, 55 insertions, 42 deletions
diff --git a/modules/codec/c++/data_raw.hpp b/modules/codec/c++/data_raw.hpp
index fabf77a..db57af3 100644
--- a/modules/codec/c++/data_raw.hpp
+++ b/modules/codec/c++/data_raw.hpp
@@ -412,7 +412,8 @@ class data<schema::Array<T,Dim>, encode::NativeRaw> {
// rawr
// data<schema::FixedArray<schema::UInt64, Dim>, encode::NativeRaw> dims_;
std::array<uint64_t, Dim> dims_;
- std::vector<typename raw_native_array_type_helper<T>::Type> value_;
+ typename raw_native_array_type_helper<T>::Type* value_;
+ uint64_t value_size_;
uint64_t get_full_size() const {
uint64_t s = 1;
@@ -426,7 +427,8 @@ class data<schema::Array<T,Dim>, encode::NativeRaw> {
}
public:
data():
- value_{}
+ value_{nullptr},
+ value_size_{0u}
{
for(auto& iter : dims_){
iter = 0u;
@@ -437,20 +439,24 @@ class data<schema::Array<T,Dim>, encode::NativeRaw> {
* Purely internal function for working C interfacing
*/
typename raw_native_array_type_helper<T>::Type* get_raw_data() {
- if(value_.size() == 0u){
+ if(value_size_ == 0u){
return nullptr;
}
return &(value_[0]);
}
- SAW_DEFAULT_COPY(data);
- SAW_DEFAULT_MOVE(data);
+ SAW_FORBID_COPY(data);
+ SAW_FORBID_MOVE(data);
data(const std::array<uint64_t, Dim>& i):
dims_{i},
- value_{}
+ value_{nullptr},
+ value_size_{0u}
{
- value_.resize(get_full_size());
+ // auto old_value = value_;
+ // auto old_value_size = value_size_;
+ value_size_ = get_full_size();
+ value_ = new typename raw_native_array_type_helper<T>::Type (value_size_);
}
data(data<MetaSchema, encode::NativeRaw> init)
@@ -458,36 +464,16 @@ class data<schema::Array<T,Dim>, encode::NativeRaw> {
for(uint64_t i = 0; i < Dim; ++i){
dims_.at(i) = init.at(i).get();
}
- value_.resize(get_full_size());
+ value_size_ = get_full_size();
+ value_ = new typename raw_native_array_type_helper<T>::Type (value_size_);
}
- template<size_t i = 0>
- error_or<void> add(saw::data<T,encode::NativeRaw> data){
- /** @todo
- * Generally the last dimension can always accept a element so to say.
- * Changing the others would require moving data due to the stride changing.
- * Since the last dimension doesn't affect the stride, we don't need reordering there.
- * But I want a quick solution for one dimension so here we are.
- *
- * I can always ignore strides and use a stacked std::vector
- * std::vector<std::vector<...>> and so on.
- * But for now I'm keeping the strides. Smaller chunks of memory aren't to bad either
- * though.
- * I'll probably change it to the smaller chunks
- */
- static_assert(Dim == 1, "Currently can't deal with higher dims");
- static_assert(i < Dim, "Can't add to dimension. Index i larger than dimension size");
-
- try {
- value_.emplace_back(std::move(data));
- }catch(const std::exception& e){
- (void) e;
- return make_error<err::out_of_memory>();
+ ~data(){
+ if(value_){
+ delete value_;
+ value_ = nullptr;
+ value_size_ = 0u;
}
-
- ++dims_.at(i);
-
- return void_t{};
}
template<std::integral... Dims>
@@ -498,38 +484,38 @@ class data<schema::Array<T,Dim>, encode::NativeRaw> {
}
data<schema::Ref<T>, encode::NativeRaw> at(const std::array<uint64_t, Dim>& ind){
- return {value_.at(this->get_flat_index(ind))};
+ return {value_[this->get_flat_index(ind)]};
}
const data<schema::Ref<T>, encode::NativeRaw> at(const std::array<uint64_t, Dim>& ind) const {
- return {value_.at(this->get_flat_index(ind))};
+ return {value_[this->get_flat_index(ind)]};
}
template<std::integral... Dims>
data<schema::Ref<T>, encode::NativeRaw> at(Dims... i){
- return {value_.at(this->get_flat_index(std::array<uint64_t, Dim>{static_cast<uint64_t>(i)...}))};
+ return {value_[this->get_flat_index(std::array<uint64_t, Dim>{static_cast<uint64_t>(i)...})]};
}
template<std::integral... Dims>
const data<schema::Ref<T>, encode::NativeRaw> at(Dims... i) const {
- return {value_.at(this->get_flat_index(std::array<uint64_t, Dim>{static_cast<uint64_t>(i)...}))};
+ return {value_[this->get_flat_index(std::array<uint64_t, Dim>{static_cast<uint64_t>(i)...})]};
}
template<typename Encoding>
data<schema::Ref<T>,encode::NativeRaw> at(const data<schema::FixedArray<schema::UInt64,Dim>, Encoding>& i){
- return {value_.at(this->get_flat_index(i))};
+ return {value_[this->get_flat_index(i)]};
}
template<typename Encoding>
const data<schema::Ref<T>,encode::NativeRaw> at(const data<schema::FixedArray<schema::UInt64,Dim>, Encoding>& i)const{
- return {value_.at(this->get_flat_index(i))};
+ return {value_[this->get_flat_index(i)]};
}
std::size_t get_dim_size(uint64_t i) const {
return dims_.at(i);
}
- size_t size() const { return value_.size();}
+ uint64_t size() const { return value_size_;}
data<schema::FixedArray<schema::UInt64, Dim>, encode::NativeRaw> get_dims() const {
return {dims_};
@@ -538,7 +524,7 @@ class data<schema::Array<T,Dim>, encode::NativeRaw> {
private:
template<typename U>
uint64_t get_flat_index(const U& i) const {
- assert(value_.size() == get_full_size());
+ assert(value_size_ == get_full_size());
uint64_t s = 0;
uint64_t stride = 1;
diff --git a/modules/remote-hip/c++/device.tmpl.hpp b/modules/remote-hip/c++/device.tmpl.hpp
index 4777660..6edf431 100644
--- a/modules/remote-hip/c++/device.tmpl.hpp
+++ b/modules/remote-hip/c++/device.tmpl.hpp
@@ -11,6 +11,7 @@ struct hip_copy_to_device {
template<typename T, uint64_t N, typename Encoding>
struct hip_copy_to_device<schema::Primitive<T,N>, Encoding> {
using Schema = schema::Primitive<T,N>;
+
static error_or<void> apply(data<Schema, Encoding>& from, data<Schema,Encoding>** to){
hipError_t malloc_err = hipMalloc(to, sizeof(data<Schema,Encoding>));
// HIP_CHECK(malloc_err);
@@ -21,5 +22,31 @@ struct hip_copy_to_device<schema::Primitive<T,N>, Encoding> {
return make_void();
}
};
+
+template<typename T, uint64_t Dim, typename Encoding>
+struct hip_copy_to_device<schema::Array<T,Dim>, Encoding> {
+ static_assert(Dim == 1u, "Only 1D arrays are supported for now.");
+ static_assert(is_primitive<T>::value, "Arrays can only handle primitives for now.");
+
+ using Schema = schema::Array<T,Dim>;
+
+ static error_or<void> apply(data<Schema, Encoding>& from, data<Schema,Encoding>** to){
+ typename native_data_type<T>::type* dat{};
+ hipError_t data_malloc_err = hipMalloc(&dat,sizeof(typename native_data_type<T>::type) * from.size());
+ hipError_t data_copy_err = hipMemcpy(&dat, &(from.get_raw_data()),sizeof(typename native_data_type<T>::type) * from.size(), hipMemcpyHostToDevice);
+
+ if(from.size() == 0u){
+ // Everything is fine. We just don't want to allocate data which doesn't exist.
+ return make_void();
+ }
+
+ // auto from_dat = &from.at(0);
+
+ hipError_t malloc_err = hipMalloc(to, sizeof(data<Schema,Encoding>));
+ hipError_t copy_err = hipMemcpy(*to, &from, sizeof(data<Schema,Encoding>), hipMemcpyHostToDevice);
+
+ return make_void();
+ }
+};
}
}