ttg_data_copy.h
Go to the documentation of this file.
1 #ifndef TTG_DATA_COPY_H
2 #define TTG_DATA_COPY_H
3 
4 #include <utility>
5 #include <limits>
6 #include <vector>
7 #include <iterator>
8 #include <atomic>
9 #include <type_traits>
10 
11 #if defined(PARSEC_HAVE_DEV_CUDA_SUPPORT)
12 #include <cuda_runtime.h>
13 #endif // PARSEC_HAVE_DEV_CUDA_SUPPORT
14 
15 #include <parsec.h>
16 
18 #include "ttg/parsec/parsec-ext.h"
19 #include "ttg/util/span.h"
20 
21 
22 namespace ttg_parsec {
23 
24  namespace detail {
25 
26  // fwd-decl
27  struct ttg_data_copy_t;
28 
29  /* templated to break cyclic dependency with ttg_data_copy_container */
30  template<typename T = ttg_data_copy_t>
33  /* set the container ptr here, will be reset in the the ttg_data_value_copy_t ctor */
35  }
36  };
37 
38  /* special type: stores a pointer to the ttg_data_copy_t. This is necessary
39  * because ttg_data_copy_t has virtual functions so we cannot cast from parsec_data_copy_t
40  * to ttg_data_copy_t (offsetof is not supported for virtual classes).
41  * The self pointer is a back-pointer to the ttg_data_copy_t. */
43  parsec_list_item_t super;
46  : self(dc)
47  {
48  PARSEC_OBJ_CONSTRUCT(&super, parsec_list_item_t);
49  }
50  };
51 
52  /* Non-owning copy-tracking wrapper, accounting for N readers or 1 writer.
53  * Also counts external references, which are not treated as
54  * readers or writers but merely prevent the object from being
55  * destroyed once no readers/writers exist.
56  */
58 
59  /* special value assigned to parsec_data_copy_t::readers to mark the copy as
60  * mutable, i.e., a task will modify it */
61  static constexpr int mutable_tag = std::numeric_limits<int>::min();
62 
64  : ttg_data_copy_self_t(this)
65  { }
66 
68  : ttg_data_copy_self_t(this)
69  {
70  /* we allow copying but do not copy any data over from the original
71  * device copies will have to be allocated again
72  * and it's a new object to reference */
73  }
74 
76  : ttg_data_copy_self_t(this)
78  , m_readers(c.m_readers)
79  , m_refs(c.m_refs.load(std::memory_order_relaxed))
80  {
81  c.m_readers = 0;
82  }
83 
85  {
86  m_next_task = c.m_next_task;
87  c.m_next_task = nullptr;
88  m_readers = c.m_readers;
89  c.m_readers = 0;
90  m_refs.store(c.m_refs.load(std::memory_order_relaxed), std::memory_order_relaxed);
91  c.m_refs.store(0, std::memory_order_relaxed);
92  return *this;
93  }
94 
96  /* we allow copying but do not copy any data over from the original
97  * device copies will have to be allocated again
98  * and it's a new object to reference */
99 
100  return *this;
101  }
102 
103  /* mark destructor as virtual */
104  virtual ~ttg_data_copy_t() = default;
105 
106  /* Returns true if the copy is mutable */
107  bool is_mutable() const {
108  return m_readers == mutable_tag;
109  }
110 
111  /* Mark the copy as mutable */
112  void mark_mutable() {
114  }
115 
116  /* Increment the reader counter and return previous value
117  * \tparam Atomic Whether to decrement atomically. Default: true
118  */
119  template<bool Atomic = true>
121  if constexpr(Atomic) {
122  return parsec_atomic_fetch_inc_int32(&m_readers);
123 // std::atomic_ref<int32_t> a{m_readers};
124 // return a.fetch_add(1, std::memory_order_relaxed);
125  } else {
126  return m_readers++;
127  }
128  }
129 
133  void reset_readers() {
134  if (mutable_tag == m_readers) {
135  m_readers = 1;
136  }
137  }
138 
139  /* Decrement the reader counter and return previous value.
140  * \tparam Atomic Whether to decrement atomically. Default: true
141  */
142  template<bool Atomic = true>
144  if constexpr(Atomic) {
145  return parsec_atomic_fetch_dec_int32(&m_readers);
146 // std::atomic_ref<int32_t> a{m_readers};
147 // return a.fetch_sub(1, std::memory_order_relaxed);
148  } else {
149  return m_readers--;
150  }
151  }
152 
153  /* Returns the number of readers if the copy is immutable, or \c mutable_tag
154  * if the copy is mutable */
155  int num_readers() const {
156  return m_readers;
157  }
158 
159  /* Returns the pointer to the user data wrapped by the the copy object */
160  virtual void* get_ptr() = 0;
161 
162  parsec_task_t* get_next_task() const {
163  return m_next_task;
164  }
165 
166  void set_next_task(parsec_task_t* task) {
167  m_next_task = task;
168  }
169 
170  int32_t add_ref() {
171  return m_refs.fetch_add(1, std::memory_order_relaxed);
172  }
173 
174  int32_t drop_ref() {
175  return m_refs.fetch_sub(1, std::memory_order_relaxed);
176  }
177 
178  bool has_ref() {
179  return (m_refs.load(std::memory_order_relaxed) != 0);
180  }
181 
182  int32_t num_ref() const {
183  return m_refs.load(std::memory_order_relaxed);
184  }
185 
186 #if defined(PARSEC_PROF_TRACE) && defined(PARSEC_TTG_PROFILE_BACKEND)
187  int64_t size;
188  int64_t uid;
189 #endif
190  protected:
191  parsec_task_t *m_next_task = nullptr;
192  int32_t m_readers = 1;
193  std::atomic<int32_t> m_refs = 1; //< number of entities referencing this copy (TTGs, external)
194  };
195 
196 
202  template<typename ValueT>
203  struct ttg_data_value_copy_t final : private ttg_data_copy_container_setter<ttg_data_copy_t>
204  , public ttg_data_copy_t {
205  using value_type = ValueT;
207 
208  template<typename T>
209  requires(std::constructible_from<ValueT, T>)
210  ttg_data_value_copy_t(T&& value)
212  , ttg_data_copy_t()
213  , m_value(std::forward<T>(value))
214  {
215  /* reset the container tracker */
216  ttg_data_copy_container() = nullptr;
217  }
218 
220  noexcept(std::is_nothrow_move_constructible_v<value_type>)
222  , ttg_data_copy_t(std::move(c))
223  , m_value(std::move(c.m_value))
224  {
225  /* reset the container tracker */
226  ttg_data_copy_container() = nullptr;
227  }
228 
230  noexcept(std::is_nothrow_copy_constructible_v<value_type>)
232  , ttg_data_copy_t(c)
233  , m_value(c.m_value)
234  {
235  /* reset the container tracker */
236  ttg_data_copy_container() = nullptr;
237  }
238 
240  noexcept(std::is_nothrow_move_assignable_v<value_type>)
241  {
242  /* set the container ptr here, will be reset in the the ttg_data_value_copy_t ctor */
243  ttg_data_copy_container() = this;
244  ttg_data_copy_t::operator=(std::move(c));
245  m_value = std::move(c.m_value);
246  /* reset the container tracker */
247  ttg_data_copy_container() = nullptr;
248  }
249 
251  noexcept(std::is_nothrow_copy_assignable_v<value_type>)
252  {
253  /* set the container ptr here, will be reset in the the ttg_data_value_copy_t ctor */
254  ttg_data_copy_container() = this;
256  m_value = c.m_value;
257  /* reset the container tracker */
258  ttg_data_copy_container() = nullptr;
259  }
260 
262  return m_value;
263  }
264 
265  /* will destruct the value */
266  virtual ~ttg_data_value_copy_t() = default;
267 
268  virtual void* get_ptr() override final {
269  return &m_value;
270  }
271  };
272  } // namespace detail
273 
274 } // namespace ttg_parsec
275 
276 #endif // TTG_DATA_COPY_H
ttg_data_copy_t *& ttg_data_copy_container()
Definition: thread_local.h:14
this contains PaRSEC-based TTG functionality
Definition: fwd.h:18
int size(World world=default_execution_context())
Definition: run.h:89
static constexpr int mutable_tag
Definition: ttg_data_copy.h:61
parsec_task_t * get_next_task() const
ttg_data_copy_t(const ttg_data_copy_t &c)
Definition: ttg_data_copy.h:67
ttg_data_copy_t(ttg_data_copy_t &&c)
Definition: ttg_data_copy.h:75
ttg_data_copy_t & operator=(const ttg_data_copy_t &c)
Definition: ttg_data_copy.h:95
ttg_data_copy_t & operator=(ttg_data_copy_t &&c)
Definition: ttg_data_copy.h:84
void set_next_task(parsec_task_t *task)
ttg_data_value_copy_t(ttg_data_value_copy_t &&c) noexcept(std::is_nothrow_move_constructible_v< value_type >)
requires(std::constructible_from< ValueT, T >) ttg_data_value_copy_t(T &&value)
ttg_data_value_copy_t & operator=(const ttg_data_value_copy_t &c) noexcept(std::is_nothrow_copy_assignable_v< value_type >)
ttg_data_value_copy_t(const ttg_data_value_copy_t &c) noexcept(std::is_nothrow_copy_constructible_v< value_type >)
virtual void * get_ptr() override final
ttg_data_value_copy_t & operator=(ttg_data_value_copy_t &&c) noexcept(std::is_nothrow_move_assignable_v< value_type >)