TT-MLIR
matmul_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
19 
20 namespace tt {
21 namespace target {
22 namespace ttnn {
23 
25 struct MatmulMultiCoreReuseProgramConfigBuilder;
26 
28 struct MatmulMultiCoreReuseMultiCastProgramConfigBuilder;
29 
31 struct MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder;
32 
34 struct MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder;
35 
36 struct MatmulOp;
37 struct MatmulOpBuilder;
38 
39 struct LinearOp;
40 struct LinearOpBuilder;
41 
42 enum class MatmulProgramConfig : uint8_t {
43  NONE = 0,
48  MIN = NONE,
50 };
51 
53  static const MatmulProgramConfig values[] = {
59  };
60  return values;
61 }
62 
63 inline const char * const *EnumNamesMatmulProgramConfig() {
64  static const char * const names[6] = {
65  "NONE",
66  "MatmulMultiCoreReuseProgramConfig",
67  "MatmulMultiCoreReuseMultiCastProgramConfig",
68  "MatmulMultiCoreReuseMultiCast1DProgramConfig",
69  "MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig",
70  nullptr
71  };
72  return names;
73 }
74 
77  const size_t index = static_cast<size_t>(e);
78  return EnumNamesMatmulProgramConfig()[index];
79 }
80 
81 template<typename T> struct MatmulProgramConfigTraits {
83 };
84 
85 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig> {
87 };
88 
89 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig> {
91 };
92 
93 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig> {
95 };
96 
97 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> {
99 };
100 
101 bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type);
102 bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types);
103 
104 struct MatmulMultiCoreReuseProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
106  struct Traits;
107  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
108  return "tt.target.ttnn.MatmulMultiCoreReuseProgramConfig";
109  }
110  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
116  VT_PER_CORE_N = 14
117  };
118  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
119  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
120  }
121  uint64_t in0_block_w() const {
122  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
123  }
124  uint64_t out_subblock_h() const {
125  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
126  }
127  uint64_t out_subblock_w() const {
128  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
129  }
130  uint64_t per_core_m() const {
131  return GetField<uint64_t>(VT_PER_CORE_M, 0);
132  }
133  uint64_t per_core_n() const {
134  return GetField<uint64_t>(VT_PER_CORE_N, 0);
135  }
136  bool Verify(::flatbuffers::Verifier &verifier) const {
137  return VerifyTableStart(verifier) &&
138  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
139  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
140  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
141  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
142  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
143  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
144  verifier.EndTable();
145  }
146 };
147 
150  ::flatbuffers::FlatBufferBuilder &fbb_;
151  ::flatbuffers::uoffset_t start_;
152  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
153  fbb_.AddStruct(MatmulMultiCoreReuseProgramConfig::VT_COMPUTE_WITH_STORAGE_GRID_SIZE, compute_with_storage_grid_size);
154  }
155  void add_in0_block_w(uint64_t in0_block_w) {
156  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
157  }
158  void add_out_subblock_h(uint64_t out_subblock_h) {
159  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
160  }
161  void add_out_subblock_w(uint64_t out_subblock_w) {
162  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
163  }
164  void add_per_core_m(uint64_t per_core_m) {
165  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_PER_CORE_M, per_core_m, 0);
166  }
167  void add_per_core_n(uint64_t per_core_n) {
168  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_PER_CORE_N, per_core_n, 0);
169  }
170  explicit MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
171  : fbb_(_fbb) {
172  start_ = fbb_.StartTable();
173  }
174  ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> Finish() {
175  const auto end = fbb_.EndTable(start_);
176  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig>(end);
177  return o;
178  }
179 };
180 
181 inline ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> CreateMatmulMultiCoreReuseProgramConfig(
182  ::flatbuffers::FlatBufferBuilder &_fbb,
183  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
184  uint64_t in0_block_w = 0,
185  uint64_t out_subblock_h = 0,
186  uint64_t out_subblock_w = 0,
187  uint64_t per_core_m = 0,
188  uint64_t per_core_n = 0) {
190  builder_.add_per_core_n(per_core_n);
191  builder_.add_per_core_m(per_core_m);
192  builder_.add_out_subblock_w(out_subblock_w);
193  builder_.add_out_subblock_h(out_subblock_h);
194  builder_.add_in0_block_w(in0_block_w);
195  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
196  return builder_.Finish();
197 }
198 
202 };
203 
204 struct MatmulMultiCoreReuseMultiCastProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
206  struct Traits;
207  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
208  return "tt.target.ttnn.MatmulMultiCoreReuseMultiCastProgramConfig";
209  }
210  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
212  VT_IN0_BLOCK_W = 6,
213  VT_OUT_SUBBLOCK_H = 8,
214  VT_OUT_SUBBLOCK_W = 10,
217  VT_PER_CORE_M = 16,
221  VT_FUSE_BATCH = 24
222  };
223  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
224  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
225  }
226  uint64_t in0_block_w() const {
227  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
228  }
229  uint64_t out_subblock_h() const {
230  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
231  }
232  uint64_t out_subblock_w() const {
233  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
234  }
235  uint64_t out_block_h() const {
236  return GetField<uint64_t>(VT_OUT_BLOCK_H, 0);
237  }
238  uint64_t out_block_w() const {
239  return GetField<uint64_t>(VT_OUT_BLOCK_W, 0);
240  }
241  uint64_t per_core_m() const {
242  return GetField<uint64_t>(VT_PER_CORE_M, 0);
243  }
244  uint64_t per_core_n() const {
245  return GetField<uint64_t>(VT_PER_CORE_N, 0);
246  }
247  bool transpose_mcast() const {
248  return GetField<uint8_t>(VT_TRANSPOSE_MCAST, 0) != 0;
249  }
250  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
251  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
252  }
253  bool fuse_batch() const {
254  return GetField<uint8_t>(VT_FUSE_BATCH, 0) != 0;
255  }
256  bool Verify(::flatbuffers::Verifier &verifier) const {
257  return VerifyTableStart(verifier) &&
258  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
259  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
260  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
261  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
262  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_H, 8) &&
263  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_W, 8) &&
264  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
265  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
266  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_MCAST, 1) &&
267  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
268  verifier.VerifyTable(fused_activation()) &&
269  VerifyField<uint8_t>(verifier, VT_FUSE_BATCH, 1) &&
270  verifier.EndTable();
271  }
272 };
273 
276  ::flatbuffers::FlatBufferBuilder &fbb_;
277  ::flatbuffers::uoffset_t start_;
278  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
280  }
281  void add_in0_block_w(uint64_t in0_block_w) {
282  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
283  }
284  void add_out_subblock_h(uint64_t out_subblock_h) {
285  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
286  }
287  void add_out_subblock_w(uint64_t out_subblock_w) {
288  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
289  }
290  void add_out_block_h(uint64_t out_block_h) {
291  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_BLOCK_H, out_block_h, 0);
292  }
293  void add_out_block_w(uint64_t out_block_w) {
294  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_BLOCK_W, out_block_w, 0);
295  }
296  void add_per_core_m(uint64_t per_core_m) {
297  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_PER_CORE_M, per_core_m, 0);
298  }
299  void add_per_core_n(uint64_t per_core_n) {
300  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_PER_CORE_N, per_core_n, 0);
301  }
302  void add_transpose_mcast(bool transpose_mcast) {
303  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_TRANSPOSE_MCAST, static_cast<uint8_t>(transpose_mcast), 0);
304  }
305  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
307  }
308  void add_fuse_batch(bool fuse_batch) {
309  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_FUSE_BATCH, static_cast<uint8_t>(fuse_batch), 0);
310  }
311  explicit MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
312  : fbb_(_fbb) {
313  start_ = fbb_.StartTable();
314  }
315  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> Finish() {
316  const auto end = fbb_.EndTable(start_);
317  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig>(end);
318  return o;
319  }
320 };
321 
322 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> CreateMatmulMultiCoreReuseMultiCastProgramConfig(
323  ::flatbuffers::FlatBufferBuilder &_fbb,
324  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
325  uint64_t in0_block_w = 0,
326  uint64_t out_subblock_h = 0,
327  uint64_t out_subblock_w = 0,
328  uint64_t out_block_h = 0,
329  uint64_t out_block_w = 0,
330  uint64_t per_core_m = 0,
331  uint64_t per_core_n = 0,
332  bool transpose_mcast = false,
333  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
334  bool fuse_batch = false) {
336  builder_.add_per_core_n(per_core_n);
337  builder_.add_per_core_m(per_core_m);
338  builder_.add_out_block_w(out_block_w);
339  builder_.add_out_block_h(out_block_h);
340  builder_.add_out_subblock_w(out_subblock_w);
341  builder_.add_out_subblock_h(out_subblock_h);
342  builder_.add_in0_block_w(in0_block_w);
343  builder_.add_fused_activation(fused_activation);
344  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
345  builder_.add_fuse_batch(fuse_batch);
346  builder_.add_transpose_mcast(transpose_mcast);
347  return builder_.Finish();
348 }
349 
353 };
354 
355 struct MatmulMultiCoreReuseMultiCast1DProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
357  struct Traits;
358  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
359  return "tt.target.ttnn.MatmulMultiCoreReuseMultiCast1DProgramConfig";
360  }
361  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
363  VT_IN0_BLOCK_W = 6,
364  VT_OUT_SUBBLOCK_H = 8,
365  VT_OUT_SUBBLOCK_W = 10,
366  VT_OUT_BLOCK_H = 12,
367  VT_OUT_BLOCK_W = 14,
368  VT_PER_CORE_M = 16,
369  VT_PER_CORE_N = 18,
371  VT_FUSED_ACTIVATION = 22,
375  VT_NUM_GLOBAL_CB_RECEIVERS = 30
376  };
377  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
378  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
379  }
380  uint64_t in0_block_w() const {
381  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
382  }
383  uint64_t out_subblock_h() const {
384  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
385  }
386  uint64_t out_subblock_w() const {
387  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
388  }
389  uint64_t out_block_h() const {
390  return GetField<uint64_t>(VT_OUT_BLOCK_H, 0);
391  }
392  uint64_t out_block_w() const {
393  return GetField<uint64_t>(VT_OUT_BLOCK_W, 0);
394  }
395  uint64_t per_core_m() const {
396  return GetField<uint64_t>(VT_PER_CORE_M, 0);
397  }
398  uint64_t per_core_n() const {
399  return GetField<uint64_t>(VT_PER_CORE_N, 0);
400  }
401  bool fuse_batch() const {
402  return GetField<uint8_t>(VT_FUSE_BATCH, 0) != 0;
403  }
404  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
405  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
406  }
407  bool mcast_in0() const {
408  return GetField<uint8_t>(VT_MCAST_IN0, 0) != 0;
409  }
410  bool gather_in0() const {
411  return GetField<uint8_t>(VT_GATHER_IN0, 0) != 0;
412  }
413  const tt::target::ttnn::CoreRangeSet *hop_cores() const {
414  return GetPointer<const tt::target::ttnn::CoreRangeSet *>(VT_HOP_CORES);
415  }
416  uint64_t num_global_cb_receivers() const {
417  return GetField<uint64_t>(VT_NUM_GLOBAL_CB_RECEIVERS, 0);
418  }
419  bool Verify(::flatbuffers::Verifier &verifier) const {
420  return VerifyTableStart(verifier) &&
421  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
422  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
423  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
424  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
425  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_H, 8) &&
426  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_W, 8) &&
427  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
428  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
429  VerifyField<uint8_t>(verifier, VT_FUSE_BATCH, 1) &&
430  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
431  verifier.VerifyTable(fused_activation()) &&
432  VerifyField<uint8_t>(verifier, VT_MCAST_IN0, 1) &&
433  VerifyField<uint8_t>(verifier, VT_GATHER_IN0, 1) &&
434  VerifyOffset(verifier, VT_HOP_CORES) &&
435  verifier.VerifyTable(hop_cores()) &&
436  VerifyField<uint64_t>(verifier, VT_NUM_GLOBAL_CB_RECEIVERS, 8) &&
437  verifier.EndTable();
438  }
439 };
440 
443  ::flatbuffers::FlatBufferBuilder &fbb_;
444  ::flatbuffers::uoffset_t start_;
445  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
447  }
448  void add_in0_block_w(uint64_t in0_block_w) {
449  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
450  }
451  void add_out_subblock_h(uint64_t out_subblock_h) {
452  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
453  }
454  void add_out_subblock_w(uint64_t out_subblock_w) {
455  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
456  }
457  void add_out_block_h(uint64_t out_block_h) {
458  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_BLOCK_H, out_block_h, 0);
459  }
460  void add_out_block_w(uint64_t out_block_w) {
461  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_BLOCK_W, out_block_w, 0);
462  }
463  void add_per_core_m(uint64_t per_core_m) {
464  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_PER_CORE_M, per_core_m, 0);
465  }
466  void add_per_core_n(uint64_t per_core_n) {
467  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_PER_CORE_N, per_core_n, 0);
468  }
469  void add_fuse_batch(bool fuse_batch) {
470  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_FUSE_BATCH, static_cast<uint8_t>(fuse_batch), 0);
471  }
472  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
474  }
475  void add_mcast_in0(bool mcast_in0) {
476  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_MCAST_IN0, static_cast<uint8_t>(mcast_in0), 0);
477  }
478  void add_gather_in0(bool gather_in0) {
479  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_GATHER_IN0, static_cast<uint8_t>(gather_in0), 0);
480  }
481  void add_hop_cores(::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores) {
483  }
484  void add_num_global_cb_receivers(uint64_t num_global_cb_receivers) {
485  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_NUM_GLOBAL_CB_RECEIVERS, num_global_cb_receivers, 0);
486  }
487  explicit MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
488  : fbb_(_fbb) {
489  start_ = fbb_.StartTable();
490  }
491  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> Finish() {
492  const auto end = fbb_.EndTable(start_);
493  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig>(end);
494  return o;
495  }
496 };
497 
498 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(
499  ::flatbuffers::FlatBufferBuilder &_fbb,
500  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
501  uint64_t in0_block_w = 0,
502  uint64_t out_subblock_h = 0,
503  uint64_t out_subblock_w = 0,
504  uint64_t out_block_h = 0,
505  uint64_t out_block_w = 0,
506  uint64_t per_core_m = 0,
507  uint64_t per_core_n = 0,
508  bool fuse_batch = false,
509  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
510  bool mcast_in0 = false,
511  bool gather_in0 = false,
512  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores = 0,
513  uint64_t num_global_cb_receivers = 0) {
515  builder_.add_num_global_cb_receivers(num_global_cb_receivers);
516  builder_.add_per_core_n(per_core_n);
517  builder_.add_per_core_m(per_core_m);
518  builder_.add_out_block_w(out_block_w);
519  builder_.add_out_block_h(out_block_h);
520  builder_.add_out_subblock_w(out_subblock_w);
521  builder_.add_out_subblock_h(out_subblock_h);
522  builder_.add_in0_block_w(in0_block_w);
523  builder_.add_hop_cores(hop_cores);
524  builder_.add_fused_activation(fused_activation);
525  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
526  builder_.add_gather_in0(gather_in0);
527  builder_.add_mcast_in0(mcast_in0);
528  builder_.add_fuse_batch(fuse_batch);
529  return builder_.Finish();
530 }
531 
535 };
536 
539  struct Traits;
540  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
541  return "tt.target.ttnn.MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig";
542  }
543  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
544  VT_IN0_BLOCK_W = 4,
545  VT_PER_CORE_M = 6,
546  VT_PER_CORE_N = 8,
548  };
549  uint64_t in0_block_w() const {
550  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
551  }
552  uint64_t per_core_m() const {
553  return GetField<uint64_t>(VT_PER_CORE_M, 0);
554  }
555  uint64_t per_core_n() const {
556  return GetField<uint64_t>(VT_PER_CORE_N, 0);
557  }
558  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
559  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
560  }
561  bool Verify(::flatbuffers::Verifier &verifier) const {
562  return VerifyTableStart(verifier) &&
563  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
564  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
565  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
566  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
567  verifier.VerifyTable(fused_activation()) &&
568  verifier.EndTable();
569  }
570 };
571 
574  ::flatbuffers::FlatBufferBuilder &fbb_;
575  ::flatbuffers::uoffset_t start_;
576  void add_in0_block_w(uint64_t in0_block_w) {
578  }
579  void add_per_core_m(uint64_t per_core_m) {
581  }
582  void add_per_core_n(uint64_t per_core_n) {
584  }
585  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
587  }
588  explicit MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
589  : fbb_(_fbb) {
590  start_ = fbb_.StartTable();
591  }
592  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> Finish() {
593  const auto end = fbb_.EndTable(start_);
594  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>(end);
595  return o;
596  }
597 };
598 
599 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(
600  ::flatbuffers::FlatBufferBuilder &_fbb,
601  uint64_t in0_block_w = 0,
602  uint64_t per_core_m = 0,
603  uint64_t per_core_n = 0,
604  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0) {
606  builder_.add_per_core_n(per_core_n);
607  builder_.add_per_core_m(per_core_m);
608  builder_.add_in0_block_w(in0_block_w);
609  builder_.add_fused_activation(fused_activation);
610  return builder_.Finish();
611 }
612 
616 };
617 
618 struct MatmulOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
620  struct Traits;
621  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
622  return "tt.target.ttnn.MatmulOp";
623  }
624  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
625  VT_A = 4,
626  VT_B = 6,
627  VT_OUT = 8,
631  VT_MATMUL_PROGRAM_CONFIG = 16
632  };
633  const tt::target::ttnn::TensorRef *a() const {
634  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_A);
635  }
636  const tt::target::ttnn::TensorRef *b() const {
637  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_B);
638  }
639  const tt::target::ttnn::TensorRef *out() const {
640  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
641  }
642  bool transpose_a() const {
643  return GetField<uint8_t>(VT_TRANSPOSE_A, 0) != 0;
644  }
645  bool transpose_b() const {
646  return GetField<uint8_t>(VT_TRANSPOSE_B, 0) != 0;
647  }
649  return static_cast<tt::target::ttnn::MatmulProgramConfig>(GetField<uint8_t>(VT_MATMUL_PROGRAM_CONFIG_TYPE, 0));
650  }
651  const void *matmul_program_config() const {
652  return GetPointer<const void *>(VT_MATMUL_PROGRAM_CONFIG);
653  }
654  template<typename T> const T *matmul_program_config_as() const;
655  const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const {
656  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *>(matmul_program_config()) : nullptr;
657  }
658  const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const {
659  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCastProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *>(matmul_program_config()) : nullptr;
660  }
661  const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const {
662  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCast1DProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *>(matmul_program_config()) : nullptr;
663  }
664  const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const {
665  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *>(matmul_program_config()) : nullptr;
666  }
667  bool Verify(::flatbuffers::Verifier &verifier) const {
668  return VerifyTableStart(verifier) &&
669  VerifyOffset(verifier, VT_A) &&
670  verifier.VerifyTable(a()) &&
671  VerifyOffset(verifier, VT_B) &&
672  verifier.VerifyTable(b()) &&
673  VerifyOffset(verifier, VT_OUT) &&
674  verifier.VerifyTable(out()) &&
675  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_A, 1) &&
676  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_B, 1) &&
677  VerifyField<uint8_t>(verifier, VT_MATMUL_PROGRAM_CONFIG_TYPE, 1) &&
678  VerifyOffset(verifier, VT_MATMUL_PROGRAM_CONFIG) &&
679  VerifyMatmulProgramConfig(verifier, matmul_program_config(), matmul_program_config_type()) &&
680  verifier.EndTable();
681  }
682 };
683 
684 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig>() const {
685  return matmul_program_config_as_MatmulMultiCoreReuseProgramConfig();
686 }
687 
688 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig>() const {
689  return matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig();
690 }
691 
692 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig>() const {
693  return matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig();
694 }
695 
696 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>() const {
697  return matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig();
698 }
699 
701  typedef MatmulOp Table;
702  ::flatbuffers::FlatBufferBuilder &fbb_;
703  ::flatbuffers::uoffset_t start_;
704  void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
705  fbb_.AddOffset(MatmulOp::VT_A, a);
706  }
707  void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
708  fbb_.AddOffset(MatmulOp::VT_B, b);
709  }
710  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
711  fbb_.AddOffset(MatmulOp::VT_OUT, out);
712  }
713  void add_transpose_a(bool transpose_a) {
714  fbb_.AddElement<uint8_t>(MatmulOp::VT_TRANSPOSE_A, static_cast<uint8_t>(transpose_a), 0);
715  }
716  void add_transpose_b(bool transpose_b) {
717  fbb_.AddElement<uint8_t>(MatmulOp::VT_TRANSPOSE_B, static_cast<uint8_t>(transpose_b), 0);
718  }
720  fbb_.AddElement<uint8_t>(MatmulOp::VT_MATMUL_PROGRAM_CONFIG_TYPE, static_cast<uint8_t>(matmul_program_config_type), 0);
721  }
722  void add_matmul_program_config(::flatbuffers::Offset<void> matmul_program_config) {
723  fbb_.AddOffset(MatmulOp::VT_MATMUL_PROGRAM_CONFIG, matmul_program_config);
724  }
725  explicit MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
726  : fbb_(_fbb) {
727  start_ = fbb_.StartTable();
728  }
729  ::flatbuffers::Offset<MatmulOp> Finish() {
730  const auto end = fbb_.EndTable(start_);
731  auto o = ::flatbuffers::Offset<MatmulOp>(end);
732  return o;
733  }
734 };
735 
736 inline ::flatbuffers::Offset<MatmulOp> CreateMatmulOp(
737  ::flatbuffers::FlatBufferBuilder &_fbb,
738  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
739  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
740  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
741  bool transpose_a = false,
742  bool transpose_b = false,
744  ::flatbuffers::Offset<void> matmul_program_config = 0) {
745  MatmulOpBuilder builder_(_fbb);
746  builder_.add_matmul_program_config(matmul_program_config);
747  builder_.add_out(out);
748  builder_.add_b(b);
749  builder_.add_a(a);
750  builder_.add_matmul_program_config_type(matmul_program_config_type);
751  builder_.add_transpose_b(transpose_b);
752  builder_.add_transpose_a(transpose_a);
753  return builder_.Finish();
754 }
755 
757  using type = MatmulOp;
758  static auto constexpr Create = CreateMatmulOp;
759 };
760 
761 struct LinearOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
763  struct Traits;
764  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
765  return "tt.target.ttnn.LinearOp";
766  }
767  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
768  VT_A = 4,
769  VT_B = 6,
770  VT_BIAS = 8,
771  VT_OUT = 10,
772  VT_TRANSPOSE_A = 12,
773  VT_TRANSPOSE_B = 14
774  };
775  const tt::target::ttnn::TensorRef *a() const {
776  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_A);
777  }
778  const tt::target::ttnn::TensorRef *b() const {
779  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_B);
780  }
781  const tt::target::ttnn::TensorRef *bias() const {
782  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
783  }
784  const tt::target::ttnn::TensorRef *out() const {
785  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
786  }
787  bool transpose_a() const {
788  return GetField<uint8_t>(VT_TRANSPOSE_A, 0) != 0;
789  }
790  bool transpose_b() const {
791  return GetField<uint8_t>(VT_TRANSPOSE_B, 0) != 0;
792  }
793  bool Verify(::flatbuffers::Verifier &verifier) const {
794  return VerifyTableStart(verifier) &&
795  VerifyOffset(verifier, VT_A) &&
796  verifier.VerifyTable(a()) &&
797  VerifyOffset(verifier, VT_B) &&
798  verifier.VerifyTable(b()) &&
799  VerifyOffset(verifier, VT_BIAS) &&
800  verifier.VerifyTable(bias()) &&
801  VerifyOffset(verifier, VT_OUT) &&
802  verifier.VerifyTable(out()) &&
803  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_A, 1) &&
804  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_B, 1) &&
805  verifier.EndTable();
806  }
807 };
808 
810  typedef LinearOp Table;
811  ::flatbuffers::FlatBufferBuilder &fbb_;
812  ::flatbuffers::uoffset_t start_;
813  void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
814  fbb_.AddOffset(LinearOp::VT_A, a);
815  }
816  void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
817  fbb_.AddOffset(LinearOp::VT_B, b);
818  }
819  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
820  fbb_.AddOffset(LinearOp::VT_BIAS, bias);
821  }
822  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
823  fbb_.AddOffset(LinearOp::VT_OUT, out);
824  }
825  void add_transpose_a(bool transpose_a) {
826  fbb_.AddElement<uint8_t>(LinearOp::VT_TRANSPOSE_A, static_cast<uint8_t>(transpose_a), 0);
827  }
828  void add_transpose_b(bool transpose_b) {
829  fbb_.AddElement<uint8_t>(LinearOp::VT_TRANSPOSE_B, static_cast<uint8_t>(transpose_b), 0);
830  }
831  explicit LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
832  : fbb_(_fbb) {
833  start_ = fbb_.StartTable();
834  }
835  ::flatbuffers::Offset<LinearOp> Finish() {
836  const auto end = fbb_.EndTable(start_);
837  auto o = ::flatbuffers::Offset<LinearOp>(end);
838  return o;
839  }
840 };
841 
842 inline ::flatbuffers::Offset<LinearOp> CreateLinearOp(
843  ::flatbuffers::FlatBufferBuilder &_fbb,
844  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
845  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
846  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
847  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
848  bool transpose_a = false,
849  bool transpose_b = false) {
850  LinearOpBuilder builder_(_fbb);
851  builder_.add_out(out);
852  builder_.add_bias(bias);
853  builder_.add_b(b);
854  builder_.add_a(a);
855  builder_.add_transpose_b(transpose_b);
856  builder_.add_transpose_a(transpose_a);
857  return builder_.Finish();
858 }
859 
861  using type = LinearOp;
862  static auto constexpr Create = CreateLinearOp;
863 };
864 
865 inline bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type) {
866  switch (type) {
868  return true;
869  }
871  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *>(obj);
872  return verifier.VerifyTable(ptr);
873  }
875  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *>(obj);
876  return verifier.VerifyTable(ptr);
877  }
879  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *>(obj);
880  return verifier.VerifyTable(ptr);
881  }
883  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *>(obj);
884  return verifier.VerifyTable(ptr);
885  }
886  default: return true;
887  }
888 }
889 
890 inline bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types) {
891  if (!values || !types) return !values && !types;
892  if (values->size() != types->size()) return false;
893  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
895  verifier, values->Get(i), types->GetEnum<MatmulProgramConfig>(i))) {
896  return false;
897  }
898  }
899  return true;
900 }
901 
902 } // namespace ttnn
903 } // namespace target
904 } // namespace tt
905 
906 #endif // FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
VT_TRANSPOSE_A
Definition: matmul_generated.h:628
VT_GATHER_IN0
Definition: matmul_generated.h:373
VT_TRANSPOSE_B
Definition: matmul_generated.h:629
VT_COMPUTE_WITH_STORAGE_GRID_SIZE
Definition: matmul_generated.h:111
VT_MCAST_IN0
Definition: matmul_generated.h:372
VT_PER_CORE_N
Definition: matmul_generated.h:218
VT_FUSED_ACTIVATION
Definition: matmul_generated.h:220
VT_PER_CORE_M
Definition: matmul_generated.h:115
VT_OUT_SUBBLOCK_H
Definition: matmul_generated.h:113
VT_TRANSPOSE_MCAST
Definition: matmul_generated.h:219
VT_OUT_BLOCK_H
Definition: matmul_generated.h:215
VT_OUT_SUBBLOCK_W
Definition: matmul_generated.h:114
VT_A
Definition: matmul_generated.h:625
VT_B
Definition: matmul_generated.h:626
VT_BIAS
Definition: matmul_generated.h:770
VT_OUT
Definition: matmul_generated.h:627
VT_IN0_BLOCK_W
Definition: matmul_generated.h:112
VT_HOP_CORES
Definition: matmul_generated.h:374
VT_OUT_BLOCK_W
Definition: matmul_generated.h:216
VT_FUSE_BATCH
Definition: matmul_generated.h:370
VT_MATMUL_PROGRAM_CONFIG_TYPE
Definition: matmul_generated.h:630
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0)
Definition: matmul_generated.h:498
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0)
Definition: matmul_generated.h:599
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > CreateMatmulMultiCoreReuseProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0)
Definition: matmul_generated.h:181
const char *const * EnumNamesMatmulProgramConfig()
Definition: matmul_generated.h:63
MatmulProgramConfig
Definition: matmul_generated.h:42
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > CreateMatmulMultiCoreReuseMultiCastProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false)
Definition: matmul_generated.h:322
bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types)
Definition: matmul_generated.h:890
const MatmulProgramConfig(& EnumValuesMatmulProgramConfig())[5]
Definition: matmul_generated.h:52
const char * EnumNameMatmulProgramConfig(MatmulProgramConfig e)
Definition: matmul_generated.h:75
inline ::flatbuffers::Offset< MatmulOp > CreateMatmulOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0)
Definition: matmul_generated.h:736
bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type)
Definition: matmul_generated.h:865
inline ::flatbuffers::Offset< LinearOp > CreateLinearOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false)
Definition: matmul_generated.h:842
Definition: debug_info_generated.h:18
Definition: binary_generated.h:31
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const
Definition: matmul_generated.h:658
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const
Definition: matmul_generated.h:661
const tt::target::ttnn::TensorRef * bias() const
Definition: matmul_generated.h:781
uint64_t per_core_n() const
Definition: matmul_generated.h:133
const tt::target::ttnn::UnaryWithParam * fused_activation() const
Definition: matmul_generated.h:250
tt::target::ttnn::MatmulProgramConfig matmul_program_config_type() const
Definition: matmul_generated.h:648
bool transpose_mcast() const
Definition: matmul_generated.h:247
bool gather_in0() const
Definition: matmul_generated.h:410
const tt::target::ttnn::TensorRef * out() const
Definition: matmul_generated.h:639
uint64_t out_block_h() const
Definition: matmul_generated.h:235
uint64_t out_block_w() const
Definition: matmul_generated.h:238
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const
Definition: matmul_generated.h:664
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const
Definition: matmul_generated.h:655
bool fuse_batch() const
Definition: matmul_generated.h:253
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder Builder
Definition: matmul_generated.h:356
const void * matmul_program_config() const
Definition: matmul_generated.h:651
uint64_t out_subblock_w() const
Definition: matmul_generated.h:127
MatmulMultiCoreReuseProgramConfigBuilder Builder
Definition: matmul_generated.h:105
const tt::target::ttnn::CoreRangeSet * hop_cores() const
Definition: matmul_generated.h:413
bool transpose_a() const
Definition: matmul_generated.h:642
bool mcast_in0() const
Definition: matmul_generated.h:407
const tt::target::ttnn::TensorRef * b() const
Definition: matmul_generated.h:636
const tt::target::ttnn::TensorRef * a() const
Definition: matmul_generated.h:633
uint64_t per_core_m() const
Definition: matmul_generated.h:130
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder Builder
Definition: matmul_generated.h:538
MatmulMultiCoreReuseMultiCastProgramConfigBuilder Builder
Definition: matmul_generated.h:205
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: matmul_generated.h:136
const tt::target::ttnn::CoreCoord * compute_with_storage_grid_size() const
Definition: matmul_generated.h:118
bool transpose_b() const
Definition: matmul_generated.h:645
uint64_t num_global_cb_receivers() const
Definition: matmul_generated.h:416
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: matmul_generated.h:107
uint64_t out_subblock_h() const
Definition: matmul_generated.h:124
LinearOpBuilder Builder
Definition: matmul_generated.h:762
uint64_t in0_block_w() const
Definition: matmul_generated.h:121
MatmulOpBuilder Builder
Definition: matmul_generated.h:619
Definition: matmul_generated.h:809
LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:831
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:822
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:813
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:816
::flatbuffers::Offset< LinearOp > Finish()
Definition: matmul_generated.h:835
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:812
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:825
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:828
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: matmul_generated.h:819
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:811
LinearOp Table
Definition: matmul_generated.h:810
Definition: matmul_generated.h:860
static constexpr auto Create
Definition: matmul_generated.h:862
LinearOp type
Definition: matmul_generated.h:861
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:487
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:444
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:466
void add_gather_in0(bool gather_in0)
Definition: matmul_generated.h:478
void add_hop_cores(::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores)
Definition: matmul_generated.h:481
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > Finish()
Definition: matmul_generated.h:491
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:443
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:448
void add_num_global_cb_receivers(uint64_t num_global_cb_receivers)
Definition: matmul_generated.h:484
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:472
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:469
MatmulMultiCoreReuseMultiCast1DProgramConfig Table
Definition: matmul_generated.h:442
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:457
void add_mcast_in0(bool mcast_in0)
Definition: matmul_generated.h:475
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:460
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:451
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:445
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:463
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:454
static constexpr auto Create
Definition: matmul_generated.h:534
MatmulMultiCoreReuseMultiCast1DProgramConfig type
Definition: matmul_generated.h:533
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:582
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig Table
Definition: matmul_generated.h:573
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:588
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:574
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:585
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:579
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:575
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > Finish()
Definition: matmul_generated.h:592
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:576
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig type
Definition: matmul_generated.h:614
static constexpr auto Create
Definition: matmul_generated.h:615
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:276
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:287
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:281
void add_transpose_mcast(bool transpose_mcast)
Definition: matmul_generated.h:302
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:290
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:299
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:293
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:278
MatmulMultiCoreReuseMultiCastProgramConfig Table
Definition: matmul_generated.h:275
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:284
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:305
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:308
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:296
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > Finish()
Definition: matmul_generated.h:315
MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:311
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:277
MatmulMultiCoreReuseMultiCastProgramConfig type
Definition: matmul_generated.h:351
static constexpr auto Create
Definition: matmul_generated.h:352
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:152
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:158
MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:170
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:150
::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > Finish()
Definition: matmul_generated.h:174
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:164
MatmulMultiCoreReuseProgramConfig Table
Definition: matmul_generated.h:149
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:151
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:161
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:155
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:167
static constexpr auto Create
Definition: matmul_generated.h:201
MatmulMultiCoreReuseProgramConfig type
Definition: matmul_generated.h:200
Definition: matmul_generated.h:700
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:703
void add_matmul_program_config_type(tt::target::ttnn::MatmulProgramConfig matmul_program_config_type)
Definition: matmul_generated.h:719
::flatbuffers::Offset< MatmulOp > Finish()
Definition: matmul_generated.h:729
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:707
MatmulOp Table
Definition: matmul_generated.h:701
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:713
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:716
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:710
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:704
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:702
void add_matmul_program_config(::flatbuffers::Offset< void > matmul_program_config)
Definition: matmul_generated.h:722
MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:725
Definition: matmul_generated.h:756
static constexpr auto Create
Definition: matmul_generated.h:758
MatmulOp type
Definition: matmul_generated.h:757
Definition: matmul_generated.h:81
static const MatmulProgramConfig enum_value
Definition: matmul_generated.h:82