TT-MLIR
ccl_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_CCL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_CCL_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
18 
19 namespace tt {
20 namespace target {
21 namespace ttnn {
22 
23 struct AllGatherOp;
24 struct AllGatherOpBuilder;
25 
26 struct CollectivePermuteOp;
27 struct CollectivePermuteOpBuilder;
28 
29 struct MeshShardOp;
30 struct MeshShardOpBuilder;
31 
32 struct ReduceScatterOp;
33 struct ReduceScatterOpBuilder;
34 
35 struct AllGatherOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
37  struct Traits;
38  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
39  return "tt.target.ttnn.AllGatherOp";
40  }
41  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
42  VT_IN = 4,
43  VT_OUT = 6,
44  VT_DEVICE = 8,
47  VT_NUM_LINKS = 14
48  };
49  const tt::target::ttnn::TensorRef *in() const {
50  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
51  }
52  const tt::target::ttnn::TensorRef *out() const {
53  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
54  }
55  const tt::target::DeviceRef *device() const {
56  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
57  }
58  int32_t all_gather_dim() const {
59  return GetField<int32_t>(VT_ALL_GATHER_DIM, 0);
60  }
61  uint32_t cluster_axis() const {
62  return GetField<uint32_t>(VT_CLUSTER_AXIS, 0);
63  }
64  uint32_t num_links() const {
65  return GetField<uint32_t>(VT_NUM_LINKS, 0);
66  }
67  bool Verify(::flatbuffers::Verifier &verifier) const {
68  return VerifyTableStart(verifier) &&
69  VerifyOffset(verifier, VT_IN) &&
70  verifier.VerifyTable(in()) &&
71  VerifyOffset(verifier, VT_OUT) &&
72  verifier.VerifyTable(out()) &&
73  VerifyOffset(verifier, VT_DEVICE) &&
74  verifier.VerifyTable(device()) &&
75  VerifyField<int32_t>(verifier, VT_ALL_GATHER_DIM, 4) &&
76  VerifyField<uint32_t>(verifier, VT_CLUSTER_AXIS, 4) &&
77  VerifyField<uint32_t>(verifier, VT_NUM_LINKS, 4) &&
78  verifier.EndTable();
79  }
80 };
81 
83  typedef AllGatherOp Table;
84  ::flatbuffers::FlatBufferBuilder &fbb_;
85  ::flatbuffers::uoffset_t start_;
86  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
87  fbb_.AddOffset(AllGatherOp::VT_IN, in);
88  }
89  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
90  fbb_.AddOffset(AllGatherOp::VT_OUT, out);
91  }
92  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
93  fbb_.AddOffset(AllGatherOp::VT_DEVICE, device);
94  }
95  void add_all_gather_dim(int32_t all_gather_dim) {
96  fbb_.AddElement<int32_t>(AllGatherOp::VT_ALL_GATHER_DIM, all_gather_dim, 0);
97  }
98  void add_cluster_axis(uint32_t cluster_axis) {
99  fbb_.AddElement<uint32_t>(AllGatherOp::VT_CLUSTER_AXIS, cluster_axis, 0);
100  }
101  void add_num_links(uint32_t num_links) {
102  fbb_.AddElement<uint32_t>(AllGatherOp::VT_NUM_LINKS, num_links, 0);
103  }
104  explicit AllGatherOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
105  : fbb_(_fbb) {
106  start_ = fbb_.StartTable();
107  }
108  ::flatbuffers::Offset<AllGatherOp> Finish() {
109  const auto end = fbb_.EndTable(start_);
110  auto o = ::flatbuffers::Offset<AllGatherOp>(end);
111  return o;
112  }
113 };
114 
115 inline ::flatbuffers::Offset<AllGatherOp> CreateAllGatherOp(
116  ::flatbuffers::FlatBufferBuilder &_fbb,
117  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
118  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
119  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
120  int32_t all_gather_dim = 0,
121  uint32_t cluster_axis = 0,
122  uint32_t num_links = 0) {
123  AllGatherOpBuilder builder_(_fbb);
124  builder_.add_num_links(num_links);
125  builder_.add_cluster_axis(cluster_axis);
126  builder_.add_all_gather_dim(all_gather_dim);
127  builder_.add_device(device);
128  builder_.add_out(out);
129  builder_.add_in(in);
130  return builder_.Finish();
131 }
132 
134  using type = AllGatherOp;
135  static auto constexpr Create = CreateAllGatherOp;
136 };
137 
138 struct CollectivePermuteOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
140  struct Traits;
141  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
142  return "tt.target.ttnn.CollectivePermuteOp";
143  }
144  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
145  VT_IN = 4,
146  VT_OUT = 6,
147  VT_DEVICE = 8,
148  VT_SOURCE_TARGET_PAIRS = 10
149  };
150  const tt::target::ttnn::TensorRef *in() const {
151  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
152  }
153  const tt::target::ttnn::TensorRef *out() const {
154  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
155  }
156  const tt::target::DeviceRef *device() const {
157  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
158  }
159  const ::flatbuffers::Vector<int64_t> *source_target_pairs() const {
160  return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_SOURCE_TARGET_PAIRS);
161  }
162  bool Verify(::flatbuffers::Verifier &verifier) const {
163  return VerifyTableStart(verifier) &&
164  VerifyOffset(verifier, VT_IN) &&
165  verifier.VerifyTable(in()) &&
166  VerifyOffset(verifier, VT_OUT) &&
167  verifier.VerifyTable(out()) &&
168  VerifyOffset(verifier, VT_DEVICE) &&
169  verifier.VerifyTable(device()) &&
170  VerifyOffset(verifier, VT_SOURCE_TARGET_PAIRS) &&
171  verifier.VerifyVector(source_target_pairs()) &&
172  verifier.EndTable();
173  }
174 };
175 
178  ::flatbuffers::FlatBufferBuilder &fbb_;
179  ::flatbuffers::uoffset_t start_;
180  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
181  fbb_.AddOffset(CollectivePermuteOp::VT_IN, in);
182  }
183  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
184  fbb_.AddOffset(CollectivePermuteOp::VT_OUT, out);
185  }
186  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
187  fbb_.AddOffset(CollectivePermuteOp::VT_DEVICE, device);
188  }
189  void add_source_target_pairs(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> source_target_pairs) {
190  fbb_.AddOffset(CollectivePermuteOp::VT_SOURCE_TARGET_PAIRS, source_target_pairs);
191  }
192  explicit CollectivePermuteOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
193  : fbb_(_fbb) {
194  start_ = fbb_.StartTable();
195  }
196  ::flatbuffers::Offset<CollectivePermuteOp> Finish() {
197  const auto end = fbb_.EndTable(start_);
198  auto o = ::flatbuffers::Offset<CollectivePermuteOp>(end);
199  return o;
200  }
201 };
202 
203 inline ::flatbuffers::Offset<CollectivePermuteOp> CreateCollectivePermuteOp(
204  ::flatbuffers::FlatBufferBuilder &_fbb,
205  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
206  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
207  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
208  ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> source_target_pairs = 0) {
209  CollectivePermuteOpBuilder builder_(_fbb);
210  builder_.add_source_target_pairs(source_target_pairs);
211  builder_.add_device(device);
212  builder_.add_out(out);
213  builder_.add_in(in);
214  return builder_.Finish();
215 }
216 
219  static auto constexpr Create = CreateCollectivePermuteOp;
220 };
221 
222 inline ::flatbuffers::Offset<CollectivePermuteOp> CreateCollectivePermuteOpDirect(
223  ::flatbuffers::FlatBufferBuilder &_fbb,
224  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
225  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
226  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
227  const std::vector<int64_t> *source_target_pairs = nullptr) {
228  auto source_target_pairs__ = source_target_pairs ? _fbb.CreateVector<int64_t>(*source_target_pairs) : 0;
230  _fbb,
231  in,
232  out,
233  device,
234  source_target_pairs__);
235 }
236 
237 struct MeshShardOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
239  struct Traits;
240  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
241  return "tt.target.ttnn.MeshShardOp";
242  }
243  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
244  VT_IN = 4,
245  VT_OUT = 6,
246  VT_DEVICE = 8,
250  VT_SHARD_DIMS = 16
251  };
252  const tt::target::ttnn::TensorRef *in() const {
253  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
254  }
255  const tt::target::ttnn::TensorRef *out() const {
256  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
257  }
258  const tt::target::DeviceRef *device() const {
259  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
260  }
262  return static_cast<tt::target::ttnn::MeshShardDirection>(GetField<uint32_t>(VT_SHARD_DIRECTION, 0));
263  }
265  return static_cast<tt::target::ttnn::MeshShardType>(GetField<uint32_t>(VT_SHARD_TYPE, 0));
266  }
267  const ::flatbuffers::Vector<int64_t> *shard_shape() const {
268  return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_SHARD_SHAPE);
269  }
270  const ::flatbuffers::Vector<int64_t> *shard_dims() const {
271  return GetPointer<const ::flatbuffers::Vector<int64_t> *>(VT_SHARD_DIMS);
272  }
273  bool Verify(::flatbuffers::Verifier &verifier) const {
274  return VerifyTableStart(verifier) &&
275  VerifyOffset(verifier, VT_IN) &&
276  verifier.VerifyTable(in()) &&
277  VerifyOffset(verifier, VT_OUT) &&
278  verifier.VerifyTable(out()) &&
279  VerifyOffset(verifier, VT_DEVICE) &&
280  verifier.VerifyTable(device()) &&
281  VerifyField<uint32_t>(verifier, VT_SHARD_DIRECTION, 4) &&
282  VerifyField<uint32_t>(verifier, VT_SHARD_TYPE, 4) &&
283  VerifyOffset(verifier, VT_SHARD_SHAPE) &&
284  verifier.VerifyVector(shard_shape()) &&
285  VerifyOffset(verifier, VT_SHARD_DIMS) &&
286  verifier.VerifyVector(shard_dims()) &&
287  verifier.EndTable();
288  }
289 };
290 
293  ::flatbuffers::FlatBufferBuilder &fbb_;
294  ::flatbuffers::uoffset_t start_;
295  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
296  fbb_.AddOffset(MeshShardOp::VT_IN, in);
297  }
298  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
299  fbb_.AddOffset(MeshShardOp::VT_OUT, out);
300  }
301  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
302  fbb_.AddOffset(MeshShardOp::VT_DEVICE, device);
303  }
305  fbb_.AddElement<uint32_t>(MeshShardOp::VT_SHARD_DIRECTION, static_cast<uint32_t>(shard_direction), 0);
306  }
308  fbb_.AddElement<uint32_t>(MeshShardOp::VT_SHARD_TYPE, static_cast<uint32_t>(shard_type), 0);
309  }
310  void add_shard_shape(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> shard_shape) {
311  fbb_.AddOffset(MeshShardOp::VT_SHARD_SHAPE, shard_shape);
312  }
313  void add_shard_dims(::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> shard_dims) {
314  fbb_.AddOffset(MeshShardOp::VT_SHARD_DIMS, shard_dims);
315  }
316  explicit MeshShardOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
317  : fbb_(_fbb) {
318  start_ = fbb_.StartTable();
319  }
320  ::flatbuffers::Offset<MeshShardOp> Finish() {
321  const auto end = fbb_.EndTable(start_);
322  auto o = ::flatbuffers::Offset<MeshShardOp>(end);
323  return o;
324  }
325 };
326 
327 inline ::flatbuffers::Offset<MeshShardOp> CreateMeshShardOp(
328  ::flatbuffers::FlatBufferBuilder &_fbb,
329  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
330  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
331  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
334  ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> shard_shape = 0,
335  ::flatbuffers::Offset<::flatbuffers::Vector<int64_t>> shard_dims = 0) {
336  MeshShardOpBuilder builder_(_fbb);
337  builder_.add_shard_dims(shard_dims);
338  builder_.add_shard_shape(shard_shape);
339  builder_.add_shard_type(shard_type);
340  builder_.add_shard_direction(shard_direction);
341  builder_.add_device(device);
342  builder_.add_out(out);
343  builder_.add_in(in);
344  return builder_.Finish();
345 }
346 
348  using type = MeshShardOp;
349  static auto constexpr Create = CreateMeshShardOp;
350 };
351 
352 inline ::flatbuffers::Offset<MeshShardOp> CreateMeshShardOpDirect(
353  ::flatbuffers::FlatBufferBuilder &_fbb,
354  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
355  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
356  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
359  const std::vector<int64_t> *shard_shape = nullptr,
360  const std::vector<int64_t> *shard_dims = nullptr) {
361  auto shard_shape__ = shard_shape ? _fbb.CreateVector<int64_t>(*shard_shape) : 0;
362  auto shard_dims__ = shard_dims ? _fbb.CreateVector<int64_t>(*shard_dims) : 0;
364  _fbb,
365  in,
366  out,
367  device,
368  shard_direction,
369  shard_type,
370  shard_shape__,
371  shard_dims__);
372 }
373 
374 struct ReduceScatterOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
376  struct Traits;
377  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
378  return "tt.target.ttnn.ReduceScatterOp";
379  }
380  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
381  VT_IN = 4,
382  VT_OUT = 6,
383  VT_DEVICE = 8,
386  VT_CLUSTER_AXIS = 14,
387  VT_NUM_LINKS = 16
388  };
389  const tt::target::ttnn::TensorRef *in() const {
390  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
391  }
392  const tt::target::ttnn::TensorRef *out() const {
393  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
394  }
395  const tt::target::DeviceRef *device() const {
396  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
397  }
398  int32_t scatter_dim() const {
399  return GetField<int32_t>(VT_SCATTER_DIM, 0);
400  }
401  uint32_t reduce_type() const {
402  return GetField<uint32_t>(VT_REDUCE_TYPE, 0);
403  }
404  uint32_t cluster_axis() const {
405  return GetField<uint32_t>(VT_CLUSTER_AXIS, 0);
406  }
407  uint32_t num_links() const {
408  return GetField<uint32_t>(VT_NUM_LINKS, 0);
409  }
410  bool Verify(::flatbuffers::Verifier &verifier) const {
411  return VerifyTableStart(verifier) &&
412  VerifyOffset(verifier, VT_IN) &&
413  verifier.VerifyTable(in()) &&
414  VerifyOffset(verifier, VT_OUT) &&
415  verifier.VerifyTable(out()) &&
416  VerifyOffset(verifier, VT_DEVICE) &&
417  verifier.VerifyTable(device()) &&
418  VerifyField<int32_t>(verifier, VT_SCATTER_DIM, 4) &&
419  VerifyField<uint32_t>(verifier, VT_REDUCE_TYPE, 4) &&
420  VerifyField<uint32_t>(verifier, VT_CLUSTER_AXIS, 4) &&
421  VerifyField<uint32_t>(verifier, VT_NUM_LINKS, 4) &&
422  verifier.EndTable();
423  }
424 };
425 
428  ::flatbuffers::FlatBufferBuilder &fbb_;
429  ::flatbuffers::uoffset_t start_;
430  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
431  fbb_.AddOffset(ReduceScatterOp::VT_IN, in);
432  }
433  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
434  fbb_.AddOffset(ReduceScatterOp::VT_OUT, out);
435  }
436  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
437  fbb_.AddOffset(ReduceScatterOp::VT_DEVICE, device);
438  }
439  void add_scatter_dim(int32_t scatter_dim) {
440  fbb_.AddElement<int32_t>(ReduceScatterOp::VT_SCATTER_DIM, scatter_dim, 0);
441  }
442  void add_reduce_type(uint32_t reduce_type) {
443  fbb_.AddElement<uint32_t>(ReduceScatterOp::VT_REDUCE_TYPE, reduce_type, 0);
444  }
445  void add_cluster_axis(uint32_t cluster_axis) {
446  fbb_.AddElement<uint32_t>(ReduceScatterOp::VT_CLUSTER_AXIS, cluster_axis, 0);
447  }
448  void add_num_links(uint32_t num_links) {
449  fbb_.AddElement<uint32_t>(ReduceScatterOp::VT_NUM_LINKS, num_links, 0);
450  }
451  explicit ReduceScatterOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
452  : fbb_(_fbb) {
453  start_ = fbb_.StartTable();
454  }
455  ::flatbuffers::Offset<ReduceScatterOp> Finish() {
456  const auto end = fbb_.EndTable(start_);
457  auto o = ::flatbuffers::Offset<ReduceScatterOp>(end);
458  return o;
459  }
460 };
461 
462 inline ::flatbuffers::Offset<ReduceScatterOp> CreateReduceScatterOp(
463  ::flatbuffers::FlatBufferBuilder &_fbb,
464  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
465  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
466  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
467  int32_t scatter_dim = 0,
468  uint32_t reduce_type = 0,
469  uint32_t cluster_axis = 0,
470  uint32_t num_links = 0) {
471  ReduceScatterOpBuilder builder_(_fbb);
472  builder_.add_num_links(num_links);
473  builder_.add_cluster_axis(cluster_axis);
474  builder_.add_reduce_type(reduce_type);
475  builder_.add_scatter_dim(scatter_dim);
476  builder_.add_device(device);
477  builder_.add_out(out);
478  builder_.add_in(in);
479  return builder_.Finish();
480 }
481 
484  static auto constexpr Create = CreateReduceScatterOp;
485 };
486 
487 } // namespace ttnn
488 } // namespace target
489 } // namespace tt
490 
491 #endif // FLATBUFFERS_GENERATED_CCL_TT_TARGET_TTNN_H_
VT_SHARD_SHAPE
Definition: ccl_generated.h:249
VT_CLUSTER_AXIS
Definition: ccl_generated.h:46
VT_ALL_GATHER_DIM
Definition: ccl_generated.h:45
VT_IN
Definition: ccl_generated.h:42
VT_SHARD_DIRECTION
Definition: ccl_generated.h:247
VT_DEVICE
Definition: ccl_generated.h:44
VT_REDUCE_TYPE
Definition: ccl_generated.h:385
VT_OUT
Definition: ccl_generated.h:43
VT_SCATTER_DIM
Definition: ccl_generated.h:384
VT_SHARD_TYPE
Definition: ccl_generated.h:248
MeshShardType
Definition: types_generated.h:164
inline ::flatbuffers::Offset< MeshShardOp > CreateMeshShardOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, tt::target::ttnn::MeshShardDirection shard_direction=tt::target::ttnn::MeshShardDirection::FullToShardShape, tt::target::ttnn::MeshShardType shard_type=tt::target::ttnn::MeshShardType::Identity, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_shape=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_dims=0)
Definition: ccl_generated.h:327
inline ::flatbuffers::Offset< MeshShardOp > CreateMeshShardOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, tt::target::ttnn::MeshShardDirection shard_direction=tt::target::ttnn::MeshShardDirection::FullToShardShape, tt::target::ttnn::MeshShardType shard_type=tt::target::ttnn::MeshShardType::Identity, const std::vector< int64_t > *shard_shape=nullptr, const std::vector< int64_t > *shard_dims=nullptr)
Definition: ccl_generated.h:352
inline ::flatbuffers::Offset< CollectivePermuteOp > CreateCollectivePermuteOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> source_target_pairs=0)
Definition: ccl_generated.h:203
inline ::flatbuffers::Offset< CollectivePermuteOp > CreateCollectivePermuteOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, const std::vector< int64_t > *source_target_pairs=nullptr)
Definition: ccl_generated.h:222
inline ::flatbuffers::Offset< AllGatherOp > CreateAllGatherOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, int32_t all_gather_dim=0, uint32_t cluster_axis=0, uint32_t num_links=0)
Definition: ccl_generated.h:115
inline ::flatbuffers::Offset< ReduceScatterOp > CreateReduceScatterOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, int32_t scatter_dim=0, uint32_t reduce_type=0, uint32_t cluster_axis=0, uint32_t num_links=0)
Definition: ccl_generated.h:462
MeshShardDirection
Definition: types_generated.h:134
Definition: debug_info_generated.h:18
Definition: debug_info_generated.h:36
Definition: ccl_generated.h:82
::flatbuffers::Offset< AllGatherOp > Finish()
Definition: ccl_generated.h:108
void add_num_links(uint32_t num_links)
Definition: ccl_generated.h:101
AllGatherOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: ccl_generated.h:104
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: ccl_generated.h:92
void add_cluster_axis(uint32_t cluster_axis)
Definition: ccl_generated.h:98
AllGatherOp Table
Definition: ccl_generated.h:83
void add_all_gather_dim(int32_t all_gather_dim)
Definition: ccl_generated.h:95
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: ccl_generated.h:86
::flatbuffers::uoffset_t start_
Definition: ccl_generated.h:85
::flatbuffers::FlatBufferBuilder & fbb_
Definition: ccl_generated.h:84
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: ccl_generated.h:89
Definition: ccl_generated.h:133
static constexpr auto Create
Definition: ccl_generated.h:135
AllGatherOp type
Definition: ccl_generated.h:134
Definition: ccl_generated.h:176
::flatbuffers::FlatBufferBuilder & fbb_
Definition: ccl_generated.h:178
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: ccl_generated.h:180
::flatbuffers::uoffset_t start_
Definition: ccl_generated.h:179
void add_source_target_pairs(::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> source_target_pairs)
Definition: ccl_generated.h:189
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: ccl_generated.h:186
CollectivePermuteOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: ccl_generated.h:192
CollectivePermuteOp Table
Definition: ccl_generated.h:177
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: ccl_generated.h:183
::flatbuffers::Offset< CollectivePermuteOp > Finish()
Definition: ccl_generated.h:196
Definition: ccl_generated.h:217
CollectivePermuteOp type
Definition: ccl_generated.h:218
static constexpr auto Create
Definition: ccl_generated.h:219
Definition: binary_generated.h:31
uint32_t reduce_type() const
Definition: ccl_generated.h:401
const ::flatbuffers::Vector< int64_t > * shard_dims() const
Definition: ccl_generated.h:270
tt::target::ttnn::MeshShardType shard_type() const
Definition: ccl_generated.h:264
uint32_t num_links() const
Definition: ccl_generated.h:64
ReduceScatterOpBuilder Builder
Definition: ccl_generated.h:375
const tt::target::ttnn::TensorRef * out() const
Definition: ccl_generated.h:52
CollectivePermuteOpBuilder Builder
Definition: ccl_generated.h:139
uint32_t cluster_axis() const
Definition: ccl_generated.h:61
int32_t scatter_dim() const
Definition: ccl_generated.h:398
AllGatherOpBuilder Builder
Definition: ccl_generated.h:36
int32_t all_gather_dim() const
Definition: ccl_generated.h:58
const ::flatbuffers::Vector< int64_t > * source_target_pairs() const
Definition: ccl_generated.h:159
const tt::target::ttnn::TensorRef * in() const
Definition: ccl_generated.h:49
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: ccl_generated.h:67
tt::target::ttnn::MeshShardDirection shard_direction() const
Definition: ccl_generated.h:261
const ::flatbuffers::Vector< int64_t > * shard_shape() const
Definition: ccl_generated.h:267
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: ccl_generated.h:38
const tt::target::DeviceRef * device() const
Definition: ccl_generated.h:55
MeshShardOpBuilder Builder
Definition: ccl_generated.h:238
Definition: ccl_generated.h:291
MeshShardOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: ccl_generated.h:316
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: ccl_generated.h:295
void add_shard_dims(::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_dims)
Definition: ccl_generated.h:313
MeshShardOp Table
Definition: ccl_generated.h:292
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: ccl_generated.h:301
::flatbuffers::uoffset_t start_
Definition: ccl_generated.h:294
::flatbuffers::FlatBufferBuilder & fbb_
Definition: ccl_generated.h:293
void add_shard_type(tt::target::ttnn::MeshShardType shard_type)
Definition: ccl_generated.h:307
void add_shard_shape(::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_shape)
Definition: ccl_generated.h:310
void add_shard_direction(tt::target::ttnn::MeshShardDirection shard_direction)
Definition: ccl_generated.h:304
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: ccl_generated.h:298
::flatbuffers::Offset< MeshShardOp > Finish()
Definition: ccl_generated.h:320
Definition: ccl_generated.h:347
MeshShardOp type
Definition: ccl_generated.h:348
static constexpr auto Create
Definition: ccl_generated.h:349
Definition: ccl_generated.h:426
void add_num_links(uint32_t num_links)
Definition: ccl_generated.h:448
ReduceScatterOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: ccl_generated.h:451
ReduceScatterOp Table
Definition: ccl_generated.h:427
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: ccl_generated.h:433
::flatbuffers::FlatBufferBuilder & fbb_
Definition: ccl_generated.h:428
::flatbuffers::uoffset_t start_
Definition: ccl_generated.h:429
void add_reduce_type(uint32_t reduce_type)
Definition: ccl_generated.h:442
void add_scatter_dim(int32_t scatter_dim)
Definition: ccl_generated.h:439
::flatbuffers::Offset< ReduceScatterOp > Finish()
Definition: ccl_generated.h:455
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: ccl_generated.h:430
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: ccl_generated.h:436
void add_cluster_axis(uint32_t cluster_axis)
Definition: ccl_generated.h:445
Definition: ccl_generated.h:482
static constexpr auto Create
Definition: ccl_generated.h:484
ReduceScatterOp type
Definition: ccl_generated.h:483