ttg 1.0.0
Template Task Graph (TTG): flowgraph-based programming model for high-performance distributed-memory algorithms
Loading...
Searching...
No Matches
ttg_data_copy.h
Go to the documentation of this file.
1// SPDX-License-Identifier: BSD-3-Clause
2#ifndef TTG_DATA_COPY_H
3#define TTG_DATA_COPY_H
4
5#include <utility>
6#include <limits>
7#include <vector>
8#include <iterator>
9#include <atomic>
10#include <type_traits>
11
12#if defined(PARSEC_HAVE_DEV_CUDA_SUPPORT)
13#include <cuda_runtime.h>
14#endif // PARSEC_HAVE_DEV_CUDA_SUPPORT
15
16#include <parsec.h>
17
20#include "ttg/util/span.h"
21
22
23namespace ttg_parsec {
24
25 namespace detail {
26
27 // fwd-decl
28 struct ttg_data_copy_t;
29
30 /* templated to break cyclic dependency with ttg_data_copy_container */
31 template<typename T = ttg_data_copy_t>
34 /* set the container ptr here, will be reset in the the ttg_data_value_copy_t ctor */
36 }
37 };
38
39 /* special type: stores a pointer to the ttg_data_copy_t. This is necessary
40 * because ttg_data_copy_t has virtual functions so we cannot cast from parsec_data_copy_t
41 * to ttg_data_copy_t (offsetof is not supported for virtual classes).
42 * The self pointer is a back-pointer to the ttg_data_copy_t. */
44 parsec_list_item_t super;
47 : self(dc)
48 {
49 PARSEC_OBJ_CONSTRUCT(&super, parsec_list_item_t);
50 }
51 };
52
53 /* Non-owning copy-tracking wrapper, accounting for N readers or 1 writer.
54 * Also counts external references, which are not treated as
55 * readers or writers but merely prevent the object from being
56 * destroyed once no readers/writers exist.
57 */
59
60 /* special value assigned to parsec_data_copy_t::readers to mark the copy as
61 * mutable, i.e., a task will modify it */
62 static constexpr int mutable_tag = std::numeric_limits<int>::min();
63
67
70 {
71 /* we allow copying but do not copy any data over from the original
72 * device copies will have to be allocated again
73 * and it's a new object to reference */
74 }
75
80 , m_refs(c.m_refs.load(std::memory_order_relaxed))
81 {
82 c.m_readers = 0;
83 }
84
86 {
88 c.m_next_task = nullptr;
89 m_readers = c.m_readers;
90 c.m_readers = 0;
91 m_refs.store(c.m_refs.load(std::memory_order_relaxed), std::memory_order_relaxed);
92 c.m_refs.store(0, std::memory_order_relaxed);
93 return *this;
94 }
95
97 /* we allow copying but do not copy any data over from the original
98 * device copies will have to be allocated again
99 * and it's a new object to reference */
100
101 return *this;
102 }
103
104 /* mark destructor as virtual */
105 virtual ~ttg_data_copy_t() = default;
106
107 /* Returns true if the copy is mutable */
108 bool is_mutable() const {
109 return m_readers == mutable_tag;
110 }
111
112 /* Mark the copy as mutable */
115 }
116
117 /* Increment the reader counter and return previous value
118 * \tparam Atomic Whether to decrement atomically. Default: true
119 */
120 template<bool Atomic = true>
122 if constexpr(Atomic) {
123 return parsec_atomic_fetch_inc_int32(&m_readers);
124// std::atomic_ref<int32_t> a{m_readers};
125// return a.fetch_add(1, std::memory_order_relaxed);
126 } else {
127 return m_readers++;
128 }
129 }
130
135 if (mutable_tag == m_readers) {
136 m_readers = 1;
137 }
138 }
139
140 /* Decrement the reader counter and return previous value.
141 * \tparam Atomic Whether to decrement atomically. Default: true
142 */
143 template<bool Atomic = true>
145 if constexpr(Atomic) {
146 return parsec_atomic_fetch_dec_int32(&m_readers);
147// std::atomic_ref<int32_t> a{m_readers};
148// return a.fetch_sub(1, std::memory_order_relaxed);
149 } else {
150 return m_readers--;
151 }
152 }
153
154 /* Returns the number of readers if the copy is immutable, or \c mutable_tag
155 * if the copy is mutable */
156 int num_readers() const {
157 return m_readers;
158 }
159
160 /* Returns the pointer to the user data wrapped by the the copy object */
161 virtual void* get_ptr() = 0;
162
163 parsec_task_t* get_next_task() const {
164 return m_next_task;
165 }
166
167 void set_next_task(parsec_task_t* task) {
168 m_next_task = task;
169 }
170
171 int32_t add_ref() {
172 return m_refs.fetch_add(1, std::memory_order_relaxed);
173 }
174
175 int32_t drop_ref() {
176 return m_refs.fetch_sub(1, std::memory_order_relaxed);
177 }
178
179 bool has_ref() {
180 return (m_refs.load(std::memory_order_relaxed) != 0);
181 }
182
183 int32_t num_ref() const {
184 return m_refs.load(std::memory_order_relaxed);
185 }
186
187#if defined(PARSEC_PROF_TRACE) && defined(PARSEC_TTG_PROFILE_BACKEND)
188 int64_t size;
189 int64_t uid;
190#endif
191 protected:
192 parsec_task_t *m_next_task = nullptr;
193 int32_t m_readers = 1;
194 std::atomic<int32_t> m_refs = 1; //< number of entities referencing this copy (TTGs, external)
195 };
196
197
203 template<typename ValueT>
204 struct ttg_data_value_copy_t final : private ttg_data_copy_container_setter<ttg_data_copy_t>
205 , public ttg_data_copy_t {
206 using value_type = ValueT;
208
209 template<typename T>
210 requires(std::constructible_from<ValueT, T>)
214 , m_value(std::forward<T>(value))
215 {
216 /* reset the container tracker */
217 ttg_data_copy_container() = nullptr;
218 }
219
221 noexcept(std::is_nothrow_move_constructible_v<value_type>)
223 , ttg_data_copy_t(std::move(c))
224 , m_value(std::move(c.m_value))
225 {
226 /* reset the container tracker */
227 ttg_data_copy_container() = nullptr;
228 }
229
231 noexcept(std::is_nothrow_copy_constructible_v<value_type>)
233 , ttg_data_copy_t(c)
234 , m_value(c.m_value)
235 {
236 /* reset the container tracker */
237 ttg_data_copy_container() = nullptr;
238 }
239
241 noexcept(std::is_nothrow_move_assignable_v<value_type>)
242 {
243 /* set the container ptr here, will be reset in the the ttg_data_value_copy_t ctor */
245 ttg_data_copy_t::operator=(std::move(c));
246 m_value = std::move(c.m_value);
247 /* reset the container tracker */
248 ttg_data_copy_container() = nullptr;
249 }
250
252 noexcept(std::is_nothrow_copy_assignable_v<value_type>)
253 {
254 /* set the container ptr here, will be reset in the the ttg_data_value_copy_t ctor */
257 m_value = c.m_value;
258 /* reset the container tracker */
259 ttg_data_copy_container() = nullptr;
260 }
261
263 return m_value;
264 }
265
266 /* will destruct the value */
267 virtual ~ttg_data_value_copy_t() = default;
268
269 virtual void* get_ptr() override final {
270 return &m_value;
271 }
272 };
273 } // namespace detail
274
275} // namespace ttg_parsec
276
277#endif // TTG_DATA_COPY_H
STL namespace.
ttg_data_copy_t *& ttg_data_copy_container()
this contains PaRSEC-based TTG functionality
Definition fwd.h:19
ttg_data_copy_t(const ttg_data_copy_t &c)
ttg_data_copy_t(ttg_data_copy_t &&c)
parsec_task_t * get_next_task() const
ttg_data_copy_t & operator=(ttg_data_copy_t &&c)
void set_next_task(parsec_task_t *task)
ttg_data_copy_t & operator=(const ttg_data_copy_t &c)
ttg_data_value_copy_t(ttg_data_value_copy_t &&c) noexcept(std::is_nothrow_move_constructible_v< value_type >)
ttg_data_value_copy_t & operator=(ttg_data_value_copy_t &&c) noexcept(std::is_nothrow_move_assignable_v< value_type >)
ttg_data_value_copy_t & operator=(const ttg_data_value_copy_t &c) noexcept(std::is_nothrow_copy_assignable_v< value_type >)
virtual void * get_ptr() override final
ttg_data_value_copy_t(const ttg_data_value_copy_t &c) noexcept(std::is_nothrow_copy_constructible_v< value_type >)