TT-MLIR
matmul_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
19 
20 namespace tt {
21 namespace target {
22 namespace ttnn {
23 
25 struct MatmulMultiCoreReuseProgramConfigBuilder;
26 
28 struct MatmulMultiCoreReuseMultiCastProgramConfigBuilder;
29 
31 struct MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder;
32 
34 struct MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder;
35 
36 struct MatmulOp;
37 struct MatmulOpBuilder;
38 
39 struct LinearOp;
40 struct LinearOpBuilder;
41 
42 enum class MatmulProgramConfig : uint8_t {
43  NONE = 0,
48  MIN = NONE,
50 };
51 
53  static const MatmulProgramConfig values[] = {
59  };
60  return values;
61 }
62 
63 inline const char * const *EnumNamesMatmulProgramConfig() {
64  static const char * const names[6] = {
65  "NONE",
66  "MatmulMultiCoreReuseProgramConfig",
67  "MatmulMultiCoreReuseMultiCastProgramConfig",
68  "MatmulMultiCoreReuseMultiCast1DProgramConfig",
69  "MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig",
70  nullptr
71  };
72  return names;
73 }
74 
77  const size_t index = static_cast<size_t>(e);
78  return EnumNamesMatmulProgramConfig()[index];
79 }
80 
81 template<typename T> struct MatmulProgramConfigTraits {
83 };
84 
85 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig> {
87 };
88 
89 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig> {
91 };
92 
93 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig> {
95 };
96 
97 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> {
99 };
100 
101 bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type);
102 bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types);
103 
104 struct MatmulMultiCoreReuseProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
106  struct Traits;
107  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
108  return "tt.target.ttnn.MatmulMultiCoreReuseProgramConfig";
109  }
110  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
116  VT_PER_CORE_N = 14
117  };
118  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
119  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
120  }
121  uint64_t in0_block_w() const {
122  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
123  }
124  uint64_t out_subblock_h() const {
125  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
126  }
127  uint64_t out_subblock_w() const {
128  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
129  }
130  uint64_t per_core_m() const {
131  return GetField<uint64_t>(VT_PER_CORE_M, 0);
132  }
133  uint64_t per_core_n() const {
134  return GetField<uint64_t>(VT_PER_CORE_N, 0);
135  }
136  bool Verify(::flatbuffers::Verifier &verifier) const {
137  return VerifyTableStart(verifier) &&
138  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
139  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
140  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
141  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
142  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
143  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
144  verifier.EndTable();
145  }
146 };
147 
150  ::flatbuffers::FlatBufferBuilder &fbb_;
151  ::flatbuffers::uoffset_t start_;
152  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
153  fbb_.AddStruct(MatmulMultiCoreReuseProgramConfig::VT_COMPUTE_WITH_STORAGE_GRID_SIZE, compute_with_storage_grid_size);
154  }
155  void add_in0_block_w(uint64_t in0_block_w) {
156  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
157  }
158  void add_out_subblock_h(uint64_t out_subblock_h) {
159  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
160  }
161  void add_out_subblock_w(uint64_t out_subblock_w) {
162  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
163  }
164  void add_per_core_m(uint64_t per_core_m) {
165  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_PER_CORE_M, per_core_m, 0);
166  }
167  void add_per_core_n(uint64_t per_core_n) {
168  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_PER_CORE_N, per_core_n, 0);
169  }
170  explicit MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
171  : fbb_(_fbb) {
172  start_ = fbb_.StartTable();
173  }
174  ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> Finish() {
175  const auto end = fbb_.EndTable(start_);
176  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig>(end);
177  return o;
178  }
179 };
180 
181 inline ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> CreateMatmulMultiCoreReuseProgramConfig(
182  ::flatbuffers::FlatBufferBuilder &_fbb,
183  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
184  uint64_t in0_block_w = 0,
185  uint64_t out_subblock_h = 0,
186  uint64_t out_subblock_w = 0,
187  uint64_t per_core_m = 0,
188  uint64_t per_core_n = 0) {
190  builder_.add_per_core_n(per_core_n);
191  builder_.add_per_core_m(per_core_m);
192  builder_.add_out_subblock_w(out_subblock_w);
193  builder_.add_out_subblock_h(out_subblock_h);
194  builder_.add_in0_block_w(in0_block_w);
195  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
196  return builder_.Finish();
197 }
198 
202 };
203 
204 struct MatmulMultiCoreReuseMultiCastProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
206  struct Traits;
207  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
208  return "tt.target.ttnn.MatmulMultiCoreReuseMultiCastProgramConfig";
209  }
210  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
212  VT_IN0_BLOCK_W = 6,
213  VT_OUT_SUBBLOCK_H = 8,
214  VT_OUT_SUBBLOCK_W = 10,
217  VT_PER_CORE_M = 16,
221  VT_FUSE_BATCH = 24
222  };
223  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
224  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
225  }
226  uint64_t in0_block_w() const {
227  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
228  }
229  uint64_t out_subblock_h() const {
230  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
231  }
232  uint64_t out_subblock_w() const {
233  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
234  }
235  uint64_t out_block_h() const {
236  return GetField<uint64_t>(VT_OUT_BLOCK_H, 0);
237  }
238  uint64_t out_block_w() const {
239  return GetField<uint64_t>(VT_OUT_BLOCK_W, 0);
240  }
241  uint64_t per_core_m() const {
242  return GetField<uint64_t>(VT_PER_CORE_M, 0);
243  }
244  uint64_t per_core_n() const {
245  return GetField<uint64_t>(VT_PER_CORE_N, 0);
246  }
247  bool transpose_mcast() const {
248  return GetField<uint8_t>(VT_TRANSPOSE_MCAST, 0) != 0;
249  }
250  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
251  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
252  }
253  bool fuse_batch() const {
254  return GetField<uint8_t>(VT_FUSE_BATCH, 0) != 0;
255  }
256  bool Verify(::flatbuffers::Verifier &verifier) const {
257  return VerifyTableStart(verifier) &&
258  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
259  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
260  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
261  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
262  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_H, 8) &&
263  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_W, 8) &&
264  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
265  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
266  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_MCAST, 1) &&
267  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
268  verifier.VerifyTable(fused_activation()) &&
269  VerifyField<uint8_t>(verifier, VT_FUSE_BATCH, 1) &&
270  verifier.EndTable();
271  }
272 };
273 
276  ::flatbuffers::FlatBufferBuilder &fbb_;
277  ::flatbuffers::uoffset_t start_;
278  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
280  }
281  void add_in0_block_w(uint64_t in0_block_w) {
282  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
283  }
284  void add_out_subblock_h(uint64_t out_subblock_h) {
285  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
286  }
287  void add_out_subblock_w(uint64_t out_subblock_w) {
288  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
289  }
290  void add_out_block_h(uint64_t out_block_h) {
291  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_BLOCK_H, out_block_h, 0);
292  }
293  void add_out_block_w(uint64_t out_block_w) {
294  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_BLOCK_W, out_block_w, 0);
295  }
296  void add_per_core_m(uint64_t per_core_m) {
297  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_PER_CORE_M, per_core_m, 0);
298  }
299  void add_per_core_n(uint64_t per_core_n) {
300  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_PER_CORE_N, per_core_n, 0);
301  }
302  void add_transpose_mcast(bool transpose_mcast) {
303  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_TRANSPOSE_MCAST, static_cast<uint8_t>(transpose_mcast), 0);
304  }
305  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
307  }
308  void add_fuse_batch(bool fuse_batch) {
309  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_FUSE_BATCH, static_cast<uint8_t>(fuse_batch), 0);
310  }
311  explicit MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
312  : fbb_(_fbb) {
313  start_ = fbb_.StartTable();
314  }
315  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> Finish() {
316  const auto end = fbb_.EndTable(start_);
317  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig>(end);
318  return o;
319  }
320 };
321 
322 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> CreateMatmulMultiCoreReuseMultiCastProgramConfig(
323  ::flatbuffers::FlatBufferBuilder &_fbb,
324  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
325  uint64_t in0_block_w = 0,
326  uint64_t out_subblock_h = 0,
327  uint64_t out_subblock_w = 0,
328  uint64_t out_block_h = 0,
329  uint64_t out_block_w = 0,
330  uint64_t per_core_m = 0,
331  uint64_t per_core_n = 0,
332  bool transpose_mcast = false,
333  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
334  bool fuse_batch = false) {
336  builder_.add_per_core_n(per_core_n);
337  builder_.add_per_core_m(per_core_m);
338  builder_.add_out_block_w(out_block_w);
339  builder_.add_out_block_h(out_block_h);
340  builder_.add_out_subblock_w(out_subblock_w);
341  builder_.add_out_subblock_h(out_subblock_h);
342  builder_.add_in0_block_w(in0_block_w);
343  builder_.add_fused_activation(fused_activation);
344  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
345  builder_.add_fuse_batch(fuse_batch);
346  builder_.add_transpose_mcast(transpose_mcast);
347  return builder_.Finish();
348 }
349 
353 };
354 
355 struct MatmulMultiCoreReuseMultiCast1DProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
357  struct Traits;
358  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
359  return "tt.target.ttnn.MatmulMultiCoreReuseMultiCast1DProgramConfig";
360  }
361  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
363  VT_IN0_BLOCK_W = 6,
364  VT_OUT_SUBBLOCK_H = 8,
365  VT_OUT_SUBBLOCK_W = 10,
366  VT_OUT_BLOCK_H = 12,
367  VT_OUT_BLOCK_W = 14,
368  VT_PER_CORE_M = 16,
369  VT_PER_CORE_N = 18,
371  VT_FUSED_ACTIVATION = 22,
376  VT_UNTILIZE_OUT = 32
377  };
378  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
379  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
380  }
381  uint64_t in0_block_w() const {
382  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
383  }
384  uint64_t out_subblock_h() const {
385  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
386  }
387  uint64_t out_subblock_w() const {
388  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
389  }
390  uint64_t out_block_h() const {
391  return GetField<uint64_t>(VT_OUT_BLOCK_H, 0);
392  }
393  uint64_t out_block_w() const {
394  return GetField<uint64_t>(VT_OUT_BLOCK_W, 0);
395  }
396  uint64_t per_core_m() const {
397  return GetField<uint64_t>(VT_PER_CORE_M, 0);
398  }
399  uint64_t per_core_n() const {
400  return GetField<uint64_t>(VT_PER_CORE_N, 0);
401  }
402  bool fuse_batch() const {
403  return GetField<uint8_t>(VT_FUSE_BATCH, 0) != 0;
404  }
405  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
406  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
407  }
408  bool mcast_in0() const {
409  return GetField<uint8_t>(VT_MCAST_IN0, 0) != 0;
410  }
411  bool gather_in0() const {
412  return GetField<uint8_t>(VT_GATHER_IN0, 0) != 0;
413  }
414  const tt::target::ttnn::CoreRangeSet *hop_cores() const {
415  return GetPointer<const tt::target::ttnn::CoreRangeSet *>(VT_HOP_CORES);
416  }
417  uint64_t num_global_cb_receivers() const {
418  return GetField<uint64_t>(VT_NUM_GLOBAL_CB_RECEIVERS, 0);
419  }
420  bool untilize_out() const {
421  return GetField<uint8_t>(VT_UNTILIZE_OUT, 0) != 0;
422  }
423  bool Verify(::flatbuffers::Verifier &verifier) const {
424  return VerifyTableStart(verifier) &&
425  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
426  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
427  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
428  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
429  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_H, 8) &&
430  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_W, 8) &&
431  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
432  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
433  VerifyField<uint8_t>(verifier, VT_FUSE_BATCH, 1) &&
434  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
435  verifier.VerifyTable(fused_activation()) &&
436  VerifyField<uint8_t>(verifier, VT_MCAST_IN0, 1) &&
437  VerifyField<uint8_t>(verifier, VT_GATHER_IN0, 1) &&
438  VerifyOffset(verifier, VT_HOP_CORES) &&
439  verifier.VerifyTable(hop_cores()) &&
440  VerifyField<uint64_t>(verifier, VT_NUM_GLOBAL_CB_RECEIVERS, 8) &&
441  VerifyField<uint8_t>(verifier, VT_UNTILIZE_OUT, 1) &&
442  verifier.EndTable();
443  }
444 };
445 
448  ::flatbuffers::FlatBufferBuilder &fbb_;
449  ::flatbuffers::uoffset_t start_;
450  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
452  }
453  void add_in0_block_w(uint64_t in0_block_w) {
454  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
455  }
456  void add_out_subblock_h(uint64_t out_subblock_h) {
457  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
458  }
459  void add_out_subblock_w(uint64_t out_subblock_w) {
460  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
461  }
462  void add_out_block_h(uint64_t out_block_h) {
463  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_BLOCK_H, out_block_h, 0);
464  }
465  void add_out_block_w(uint64_t out_block_w) {
466  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_BLOCK_W, out_block_w, 0);
467  }
468  void add_per_core_m(uint64_t per_core_m) {
469  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_PER_CORE_M, per_core_m, 0);
470  }
471  void add_per_core_n(uint64_t per_core_n) {
472  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_PER_CORE_N, per_core_n, 0);
473  }
474  void add_fuse_batch(bool fuse_batch) {
475  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_FUSE_BATCH, static_cast<uint8_t>(fuse_batch), 0);
476  }
477  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
479  }
480  void add_mcast_in0(bool mcast_in0) {
481  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_MCAST_IN0, static_cast<uint8_t>(mcast_in0), 0);
482  }
483  void add_gather_in0(bool gather_in0) {
484  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_GATHER_IN0, static_cast<uint8_t>(gather_in0), 0);
485  }
486  void add_hop_cores(::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores) {
488  }
489  void add_num_global_cb_receivers(uint64_t num_global_cb_receivers) {
490  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_NUM_GLOBAL_CB_RECEIVERS, num_global_cb_receivers, 0);
491  }
492  void add_untilize_out(bool untilize_out) {
493  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_UNTILIZE_OUT, static_cast<uint8_t>(untilize_out), 0);
494  }
495  explicit MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
496  : fbb_(_fbb) {
497  start_ = fbb_.StartTable();
498  }
499  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> Finish() {
500  const auto end = fbb_.EndTable(start_);
501  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig>(end);
502  return o;
503  }
504 };
505 
506 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(
507  ::flatbuffers::FlatBufferBuilder &_fbb,
508  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
509  uint64_t in0_block_w = 0,
510  uint64_t out_subblock_h = 0,
511  uint64_t out_subblock_w = 0,
512  uint64_t out_block_h = 0,
513  uint64_t out_block_w = 0,
514  uint64_t per_core_m = 0,
515  uint64_t per_core_n = 0,
516  bool fuse_batch = false,
517  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
518  bool mcast_in0 = false,
519  bool gather_in0 = false,
520  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores = 0,
521  uint64_t num_global_cb_receivers = 0,
522  bool untilize_out = false) {
524  builder_.add_num_global_cb_receivers(num_global_cb_receivers);
525  builder_.add_per_core_n(per_core_n);
526  builder_.add_per_core_m(per_core_m);
527  builder_.add_out_block_w(out_block_w);
528  builder_.add_out_block_h(out_block_h);
529  builder_.add_out_subblock_w(out_subblock_w);
530  builder_.add_out_subblock_h(out_subblock_h);
531  builder_.add_in0_block_w(in0_block_w);
532  builder_.add_hop_cores(hop_cores);
533  builder_.add_fused_activation(fused_activation);
534  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
535  builder_.add_untilize_out(untilize_out);
536  builder_.add_gather_in0(gather_in0);
537  builder_.add_mcast_in0(mcast_in0);
538  builder_.add_fuse_batch(fuse_batch);
539  return builder_.Finish();
540 }
541 
545 };
546 
549  struct Traits;
550  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
551  return "tt.target.ttnn.MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig";
552  }
553  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
554  VT_IN0_BLOCK_W = 4,
555  VT_PER_CORE_M = 6,
556  VT_PER_CORE_N = 8,
558  };
559  uint64_t in0_block_w() const {
560  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
561  }
562  uint64_t per_core_m() const {
563  return GetField<uint64_t>(VT_PER_CORE_M, 0);
564  }
565  uint64_t per_core_n() const {
566  return GetField<uint64_t>(VT_PER_CORE_N, 0);
567  }
568  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
569  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
570  }
571  bool Verify(::flatbuffers::Verifier &verifier) const {
572  return VerifyTableStart(verifier) &&
573  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
574  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
575  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
576  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
577  verifier.VerifyTable(fused_activation()) &&
578  verifier.EndTable();
579  }
580 };
581 
584  ::flatbuffers::FlatBufferBuilder &fbb_;
585  ::flatbuffers::uoffset_t start_;
586  void add_in0_block_w(uint64_t in0_block_w) {
588  }
589  void add_per_core_m(uint64_t per_core_m) {
591  }
592  void add_per_core_n(uint64_t per_core_n) {
594  }
595  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
597  }
598  explicit MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
599  : fbb_(_fbb) {
600  start_ = fbb_.StartTable();
601  }
602  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> Finish() {
603  const auto end = fbb_.EndTable(start_);
604  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>(end);
605  return o;
606  }
607 };
608 
609 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(
610  ::flatbuffers::FlatBufferBuilder &_fbb,
611  uint64_t in0_block_w = 0,
612  uint64_t per_core_m = 0,
613  uint64_t per_core_n = 0,
614  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0) {
616  builder_.add_per_core_n(per_core_n);
617  builder_.add_per_core_m(per_core_m);
618  builder_.add_in0_block_w(in0_block_w);
619  builder_.add_fused_activation(fused_activation);
620  return builder_.Finish();
621 }
622 
626 };
627 
628 struct MatmulOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
630  struct Traits;
631  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
632  return "tt.target.ttnn.MatmulOp";
633  }
634  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
635  VT_A = 4,
636  VT_B = 6,
637  VT_OUT = 8,
641  VT_MATMUL_PROGRAM_CONFIG = 16
642  };
643  const tt::target::ttnn::TensorRef *a() const {
644  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_A);
645  }
646  const tt::target::ttnn::TensorRef *b() const {
647  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_B);
648  }
649  const tt::target::ttnn::TensorRef *out() const {
650  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
651  }
652  bool transpose_a() const {
653  return GetField<uint8_t>(VT_TRANSPOSE_A, 0) != 0;
654  }
655  bool transpose_b() const {
656  return GetField<uint8_t>(VT_TRANSPOSE_B, 0) != 0;
657  }
659  return static_cast<tt::target::ttnn::MatmulProgramConfig>(GetField<uint8_t>(VT_MATMUL_PROGRAM_CONFIG_TYPE, 0));
660  }
661  const void *matmul_program_config() const {
662  return GetPointer<const void *>(VT_MATMUL_PROGRAM_CONFIG);
663  }
664  template<typename T> const T *matmul_program_config_as() const;
665  const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const {
666  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *>(matmul_program_config()) : nullptr;
667  }
668  const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const {
669  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCastProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *>(matmul_program_config()) : nullptr;
670  }
671  const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const {
672  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCast1DProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *>(matmul_program_config()) : nullptr;
673  }
674  const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const {
675  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *>(matmul_program_config()) : nullptr;
676  }
677  bool Verify(::flatbuffers::Verifier &verifier) const {
678  return VerifyTableStart(verifier) &&
679  VerifyOffset(verifier, VT_A) &&
680  verifier.VerifyTable(a()) &&
681  VerifyOffset(verifier, VT_B) &&
682  verifier.VerifyTable(b()) &&
683  VerifyOffset(verifier, VT_OUT) &&
684  verifier.VerifyTable(out()) &&
685  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_A, 1) &&
686  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_B, 1) &&
687  VerifyField<uint8_t>(verifier, VT_MATMUL_PROGRAM_CONFIG_TYPE, 1) &&
688  VerifyOffset(verifier, VT_MATMUL_PROGRAM_CONFIG) &&
689  VerifyMatmulProgramConfig(verifier, matmul_program_config(), matmul_program_config_type()) &&
690  verifier.EndTable();
691  }
692 };
693 
694 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig>() const {
695  return matmul_program_config_as_MatmulMultiCoreReuseProgramConfig();
696 }
697 
698 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig>() const {
699  return matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig();
700 }
701 
702 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig>() const {
703  return matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig();
704 }
705 
706 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>() const {
707  return matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig();
708 }
709 
711  typedef MatmulOp Table;
712  ::flatbuffers::FlatBufferBuilder &fbb_;
713  ::flatbuffers::uoffset_t start_;
714  void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
715  fbb_.AddOffset(MatmulOp::VT_A, a);
716  }
717  void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
718  fbb_.AddOffset(MatmulOp::VT_B, b);
719  }
720  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
721  fbb_.AddOffset(MatmulOp::VT_OUT, out);
722  }
723  void add_transpose_a(bool transpose_a) {
724  fbb_.AddElement<uint8_t>(MatmulOp::VT_TRANSPOSE_A, static_cast<uint8_t>(transpose_a), 0);
725  }
726  void add_transpose_b(bool transpose_b) {
727  fbb_.AddElement<uint8_t>(MatmulOp::VT_TRANSPOSE_B, static_cast<uint8_t>(transpose_b), 0);
728  }
730  fbb_.AddElement<uint8_t>(MatmulOp::VT_MATMUL_PROGRAM_CONFIG_TYPE, static_cast<uint8_t>(matmul_program_config_type), 0);
731  }
732  void add_matmul_program_config(::flatbuffers::Offset<void> matmul_program_config) {
733  fbb_.AddOffset(MatmulOp::VT_MATMUL_PROGRAM_CONFIG, matmul_program_config);
734  }
735  explicit MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
736  : fbb_(_fbb) {
737  start_ = fbb_.StartTable();
738  }
739  ::flatbuffers::Offset<MatmulOp> Finish() {
740  const auto end = fbb_.EndTable(start_);
741  auto o = ::flatbuffers::Offset<MatmulOp>(end);
742  return o;
743  }
744 };
745 
746 inline ::flatbuffers::Offset<MatmulOp> CreateMatmulOp(
747  ::flatbuffers::FlatBufferBuilder &_fbb,
748  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
749  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
750  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
751  bool transpose_a = false,
752  bool transpose_b = false,
754  ::flatbuffers::Offset<void> matmul_program_config = 0) {
755  MatmulOpBuilder builder_(_fbb);
756  builder_.add_matmul_program_config(matmul_program_config);
757  builder_.add_out(out);
758  builder_.add_b(b);
759  builder_.add_a(a);
760  builder_.add_matmul_program_config_type(matmul_program_config_type);
761  builder_.add_transpose_b(transpose_b);
762  builder_.add_transpose_a(transpose_a);
763  return builder_.Finish();
764 }
765 
767  using type = MatmulOp;
768  static auto constexpr Create = CreateMatmulOp;
769 };
770 
771 struct LinearOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
773  struct Traits;
774  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
775  return "tt.target.ttnn.LinearOp";
776  }
777  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
778  VT_A = 4,
779  VT_B = 6,
780  VT_BIAS = 8,
781  VT_OUT = 10,
782  VT_TRANSPOSE_A = 12,
783  VT_TRANSPOSE_B = 14
784  };
785  const tt::target::ttnn::TensorRef *a() const {
786  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_A);
787  }
788  const tt::target::ttnn::TensorRef *b() const {
789  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_B);
790  }
791  const tt::target::ttnn::TensorRef *bias() const {
792  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
793  }
794  const tt::target::ttnn::TensorRef *out() const {
795  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
796  }
797  bool transpose_a() const {
798  return GetField<uint8_t>(VT_TRANSPOSE_A, 0) != 0;
799  }
800  bool transpose_b() const {
801  return GetField<uint8_t>(VT_TRANSPOSE_B, 0) != 0;
802  }
803  bool Verify(::flatbuffers::Verifier &verifier) const {
804  return VerifyTableStart(verifier) &&
805  VerifyOffset(verifier, VT_A) &&
806  verifier.VerifyTable(a()) &&
807  VerifyOffset(verifier, VT_B) &&
808  verifier.VerifyTable(b()) &&
809  VerifyOffset(verifier, VT_BIAS) &&
810  verifier.VerifyTable(bias()) &&
811  VerifyOffset(verifier, VT_OUT) &&
812  verifier.VerifyTable(out()) &&
813  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_A, 1) &&
814  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_B, 1) &&
815  verifier.EndTable();
816  }
817 };
818 
820  typedef LinearOp Table;
821  ::flatbuffers::FlatBufferBuilder &fbb_;
822  ::flatbuffers::uoffset_t start_;
823  void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
824  fbb_.AddOffset(LinearOp::VT_A, a);
825  }
826  void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
827  fbb_.AddOffset(LinearOp::VT_B, b);
828  }
829  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
830  fbb_.AddOffset(LinearOp::VT_BIAS, bias);
831  }
832  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
833  fbb_.AddOffset(LinearOp::VT_OUT, out);
834  }
835  void add_transpose_a(bool transpose_a) {
836  fbb_.AddElement<uint8_t>(LinearOp::VT_TRANSPOSE_A, static_cast<uint8_t>(transpose_a), 0);
837  }
838  void add_transpose_b(bool transpose_b) {
839  fbb_.AddElement<uint8_t>(LinearOp::VT_TRANSPOSE_B, static_cast<uint8_t>(transpose_b), 0);
840  }
841  explicit LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
842  : fbb_(_fbb) {
843  start_ = fbb_.StartTable();
844  }
845  ::flatbuffers::Offset<LinearOp> Finish() {
846  const auto end = fbb_.EndTable(start_);
847  auto o = ::flatbuffers::Offset<LinearOp>(end);
848  return o;
849  }
850 };
851 
852 inline ::flatbuffers::Offset<LinearOp> CreateLinearOp(
853  ::flatbuffers::FlatBufferBuilder &_fbb,
854  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
855  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
856  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
857  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
858  bool transpose_a = false,
859  bool transpose_b = false) {
860  LinearOpBuilder builder_(_fbb);
861  builder_.add_out(out);
862  builder_.add_bias(bias);
863  builder_.add_b(b);
864  builder_.add_a(a);
865  builder_.add_transpose_b(transpose_b);
866  builder_.add_transpose_a(transpose_a);
867  return builder_.Finish();
868 }
869 
871  using type = LinearOp;
872  static auto constexpr Create = CreateLinearOp;
873 };
874 
875 inline bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type) {
876  switch (type) {
878  return true;
879  }
881  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *>(obj);
882  return verifier.VerifyTable(ptr);
883  }
885  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *>(obj);
886  return verifier.VerifyTable(ptr);
887  }
889  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *>(obj);
890  return verifier.VerifyTable(ptr);
891  }
893  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *>(obj);
894  return verifier.VerifyTable(ptr);
895  }
896  default: return true;
897  }
898 }
899 
900 inline bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types) {
901  if (!values || !types) return !values && !types;
902  if (values->size() != types->size()) return false;
903  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
905  verifier, values->Get(i), types->GetEnum<MatmulProgramConfig>(i))) {
906  return false;
907  }
908  }
909  return true;
910 }
911 
912 } // namespace ttnn
913 } // namespace target
914 } // namespace tt
915 
916 #endif // FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
VT_TRANSPOSE_A
Definition: matmul_generated.h:638
VT_GATHER_IN0
Definition: matmul_generated.h:373
VT_TRANSPOSE_B
Definition: matmul_generated.h:639
VT_COMPUTE_WITH_STORAGE_GRID_SIZE
Definition: matmul_generated.h:111
VT_MCAST_IN0
Definition: matmul_generated.h:372
VT_PER_CORE_N
Definition: matmul_generated.h:218
VT_FUSED_ACTIVATION
Definition: matmul_generated.h:220
VT_PER_CORE_M
Definition: matmul_generated.h:115
VT_OUT_SUBBLOCK_H
Definition: matmul_generated.h:113
VT_TRANSPOSE_MCAST
Definition: matmul_generated.h:219
VT_OUT_BLOCK_H
Definition: matmul_generated.h:215
VT_OUT_SUBBLOCK_W
Definition: matmul_generated.h:114
VT_A
Definition: matmul_generated.h:635
VT_NUM_GLOBAL_CB_RECEIVERS
Definition: matmul_generated.h:375
VT_B
Definition: matmul_generated.h:636
VT_BIAS
Definition: matmul_generated.h:780
VT_OUT
Definition: matmul_generated.h:637
VT_IN0_BLOCK_W
Definition: matmul_generated.h:112
VT_HOP_CORES
Definition: matmul_generated.h:374
VT_OUT_BLOCK_W
Definition: matmul_generated.h:216
VT_FUSE_BATCH
Definition: matmul_generated.h:370
VT_MATMUL_PROGRAM_CONFIG_TYPE
Definition: matmul_generated.h:640
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0)
Definition: matmul_generated.h:609
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > CreateMatmulMultiCoreReuseProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0)
Definition: matmul_generated.h:181
const char *const * EnumNamesMatmulProgramConfig()
Definition: matmul_generated.h:63
MatmulProgramConfig
Definition: matmul_generated.h:42
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0, bool untilize_out=false)
Definition: matmul_generated.h:506
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > CreateMatmulMultiCoreReuseMultiCastProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false)
Definition: matmul_generated.h:322
bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types)
Definition: matmul_generated.h:900
const MatmulProgramConfig(& EnumValuesMatmulProgramConfig())[5]
Definition: matmul_generated.h:52
const char * EnumNameMatmulProgramConfig(MatmulProgramConfig e)
Definition: matmul_generated.h:75
inline ::flatbuffers::Offset< MatmulOp > CreateMatmulOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0)
Definition: matmul_generated.h:746
bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type)
Definition: matmul_generated.h:875
inline ::flatbuffers::Offset< LinearOp > CreateLinearOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false)
Definition: matmul_generated.h:852
Definition: debug_info_generated.h:18
Definition: binary_generated.h:31
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const
Definition: matmul_generated.h:668
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const
Definition: matmul_generated.h:671
const tt::target::ttnn::TensorRef * bias() const
Definition: matmul_generated.h:791
uint64_t per_core_n() const
Definition: matmul_generated.h:133
const tt::target::ttnn::UnaryWithParam * fused_activation() const
Definition: matmul_generated.h:250
tt::target::ttnn::MatmulProgramConfig matmul_program_config_type() const
Definition: matmul_generated.h:658
bool transpose_mcast() const
Definition: matmul_generated.h:247
bool gather_in0() const
Definition: matmul_generated.h:411
const tt::target::ttnn::TensorRef * out() const
Definition: matmul_generated.h:649
uint64_t out_block_h() const
Definition: matmul_generated.h:235
uint64_t out_block_w() const
Definition: matmul_generated.h:238
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const
Definition: matmul_generated.h:674
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const
Definition: matmul_generated.h:665
bool fuse_batch() const
Definition: matmul_generated.h:253
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder Builder
Definition: matmul_generated.h:356
const void * matmul_program_config() const
Definition: matmul_generated.h:661
uint64_t out_subblock_w() const
Definition: matmul_generated.h:127
bool untilize_out() const
Definition: matmul_generated.h:420
MatmulMultiCoreReuseProgramConfigBuilder Builder
Definition: matmul_generated.h:105
const tt::target::ttnn::CoreRangeSet * hop_cores() const
Definition: matmul_generated.h:414
bool transpose_a() const
Definition: matmul_generated.h:652
bool mcast_in0() const
Definition: matmul_generated.h:408
const tt::target::ttnn::TensorRef * b() const
Definition: matmul_generated.h:646
const tt::target::ttnn::TensorRef * a() const
Definition: matmul_generated.h:643
uint64_t per_core_m() const
Definition: matmul_generated.h:130
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder Builder
Definition: matmul_generated.h:548
MatmulMultiCoreReuseMultiCastProgramConfigBuilder Builder
Definition: matmul_generated.h:205
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: matmul_generated.h:136
const tt::target::ttnn::CoreCoord * compute_with_storage_grid_size() const
Definition: matmul_generated.h:118
bool transpose_b() const
Definition: matmul_generated.h:655
uint64_t num_global_cb_receivers() const
Definition: matmul_generated.h:417
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: matmul_generated.h:107
uint64_t out_subblock_h() const
Definition: matmul_generated.h:124
LinearOpBuilder Builder
Definition: matmul_generated.h:772
uint64_t in0_block_w() const
Definition: matmul_generated.h:121
MatmulOpBuilder Builder
Definition: matmul_generated.h:629
Definition: matmul_generated.h:819
LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:841
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:832
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:823
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:826
::flatbuffers::Offset< LinearOp > Finish()
Definition: matmul_generated.h:845
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:822
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:835
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:838
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: matmul_generated.h:829
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:821
LinearOp Table
Definition: matmul_generated.h:820
Definition: matmul_generated.h:870
static constexpr auto Create
Definition: matmul_generated.h:872
LinearOp type
Definition: matmul_generated.h:871
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:495
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:449
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:471
void add_gather_in0(bool gather_in0)
Definition: matmul_generated.h:483
void add_hop_cores(::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores)
Definition: matmul_generated.h:486
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > Finish()
Definition: matmul_generated.h:499
void add_untilize_out(bool untilize_out)
Definition: matmul_generated.h:492
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:448
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:453
void add_num_global_cb_receivers(uint64_t num_global_cb_receivers)
Definition: matmul_generated.h:489
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:477
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:474
MatmulMultiCoreReuseMultiCast1DProgramConfig Table
Definition: matmul_generated.h:447
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:462
void add_mcast_in0(bool mcast_in0)
Definition: matmul_generated.h:480
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:465
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:456
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:450
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:468
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:459
static constexpr auto Create
Definition: matmul_generated.h:544
MatmulMultiCoreReuseMultiCast1DProgramConfig type
Definition: matmul_generated.h:543
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:592
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig Table
Definition: matmul_generated.h:583
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:598
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:584
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:595
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:589
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:585
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > Finish()
Definition: matmul_generated.h:602
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:586
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig type
Definition: matmul_generated.h:624
static constexpr auto Create
Definition: matmul_generated.h:625
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:276
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:287
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:281
void add_transpose_mcast(bool transpose_mcast)
Definition: matmul_generated.h:302
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:290
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:299
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:293
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:278
MatmulMultiCoreReuseMultiCastProgramConfig Table
Definition: matmul_generated.h:275
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:284
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:305
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:308
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:296
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > Finish()
Definition: matmul_generated.h:315
MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:311
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:277
MatmulMultiCoreReuseMultiCastProgramConfig type
Definition: matmul_generated.h:351
static constexpr auto Create
Definition: matmul_generated.h:352
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:152
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:158
MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:170
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:150
::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > Finish()
Definition: matmul_generated.h:174
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:164
MatmulMultiCoreReuseProgramConfig Table
Definition: matmul_generated.h:149
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:151
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:161
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:155
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:167
static constexpr auto Create
Definition: matmul_generated.h:201
MatmulMultiCoreReuseProgramConfig type
Definition: matmul_generated.h:200
Definition: matmul_generated.h:710
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:713
void add_matmul_program_config_type(tt::target::ttnn::MatmulProgramConfig matmul_program_config_type)
Definition: matmul_generated.h:729
::flatbuffers::Offset< MatmulOp > Finish()
Definition: matmul_generated.h:739
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:717
MatmulOp Table
Definition: matmul_generated.h:711
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:723
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:726
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:720
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:714
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:712
void add_matmul_program_config(::flatbuffers::Offset< void > matmul_program_config)
Definition: matmul_generated.h:732
MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:735
Definition: matmul_generated.h:766
static constexpr auto Create
Definition: matmul_generated.h:768
MatmulOp type
Definition: matmul_generated.h:767
Definition: matmul_generated.h:81
static const MatmulProgramConfig enum_value
Definition: matmul_generated.h:82