TT-MLIR
matmul_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
19 
20 namespace tt {
21 namespace target {
22 namespace ttnn {
23 
25 struct MatmulMultiCoreReuseProgramConfigBuilder;
26 
28 struct MatmulMultiCoreReuseMultiCastProgramConfigBuilder;
29 
31 struct MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder;
32 
34 struct MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder;
35 
36 struct MatmulOp;
37 struct MatmulOpBuilder;
38 
39 struct LinearOp;
40 struct LinearOpBuilder;
41 
42 enum class MatmulProgramConfig : uint8_t {
43  NONE = 0,
48  MIN = NONE,
50 };
51 
53  static const MatmulProgramConfig values[] = {
59  };
60  return values;
61 }
62 
63 inline const char * const *EnumNamesMatmulProgramConfig() {
64  static const char * const names[6] = {
65  "NONE",
66  "MatmulMultiCoreReuseProgramConfig",
67  "MatmulMultiCoreReuseMultiCastProgramConfig",
68  "MatmulMultiCoreReuseMultiCast1DProgramConfig",
69  "MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig",
70  nullptr
71  };
72  return names;
73 }
74 
77  const size_t index = static_cast<size_t>(e);
78  return EnumNamesMatmulProgramConfig()[index];
79 }
80 
81 template<typename T> struct MatmulProgramConfigTraits {
83 };
84 
85 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig> {
87 };
88 
89 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig> {
91 };
92 
93 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig> {
95 };
96 
97 template<> struct MatmulProgramConfigTraits<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> {
99 };
100 
101 bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type);
102 bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types);
103 
104 struct MatmulMultiCoreReuseProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
106  struct Traits;
107  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
113  VT_PER_CORE_N = 14
114  };
115  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
116  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
117  }
118  uint64_t in0_block_w() const {
119  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
120  }
121  uint64_t out_subblock_h() const {
122  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
123  }
124  uint64_t out_subblock_w() const {
125  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
126  }
127  uint64_t per_core_m() const {
128  return GetField<uint64_t>(VT_PER_CORE_M, 0);
129  }
130  uint64_t per_core_n() const {
131  return GetField<uint64_t>(VT_PER_CORE_N, 0);
132  }
133  bool Verify(::flatbuffers::Verifier &verifier) const {
134  return VerifyTableStart(verifier) &&
135  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
136  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
137  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
138  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
139  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
140  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
141  verifier.EndTable();
142  }
143 };
144 
147  ::flatbuffers::FlatBufferBuilder &fbb_;
148  ::flatbuffers::uoffset_t start_;
149  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
150  fbb_.AddStruct(MatmulMultiCoreReuseProgramConfig::VT_COMPUTE_WITH_STORAGE_GRID_SIZE, compute_with_storage_grid_size);
151  }
152  void add_in0_block_w(uint64_t in0_block_w) {
153  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
154  }
155  void add_out_subblock_h(uint64_t out_subblock_h) {
156  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
157  }
158  void add_out_subblock_w(uint64_t out_subblock_w) {
159  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
160  }
161  void add_per_core_m(uint64_t per_core_m) {
162  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_PER_CORE_M, per_core_m, 0);
163  }
164  void add_per_core_n(uint64_t per_core_n) {
165  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseProgramConfig::VT_PER_CORE_N, per_core_n, 0);
166  }
167  explicit MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
168  : fbb_(_fbb) {
169  start_ = fbb_.StartTable();
170  }
171  ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> Finish() {
172  const auto end = fbb_.EndTable(start_);
173  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig>(end);
174  return o;
175  }
176 };
177 
178 inline ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> CreateMatmulMultiCoreReuseProgramConfig(
179  ::flatbuffers::FlatBufferBuilder &_fbb,
180  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
181  uint64_t in0_block_w = 0,
182  uint64_t out_subblock_h = 0,
183  uint64_t out_subblock_w = 0,
184  uint64_t per_core_m = 0,
185  uint64_t per_core_n = 0) {
187  builder_.add_per_core_n(per_core_n);
188  builder_.add_per_core_m(per_core_m);
189  builder_.add_out_subblock_w(out_subblock_w);
190  builder_.add_out_subblock_h(out_subblock_h);
191  builder_.add_in0_block_w(in0_block_w);
192  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
193  return builder_.Finish();
194 }
195 
199 };
200 
201 struct MatmulMultiCoreReuseMultiCastProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
203  struct Traits;
204  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
206  VT_IN0_BLOCK_W = 6,
207  VT_OUT_SUBBLOCK_H = 8,
208  VT_OUT_SUBBLOCK_W = 10,
211  VT_PER_CORE_M = 16,
215  VT_FUSE_BATCH = 24
216  };
217  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
218  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
219  }
220  uint64_t in0_block_w() const {
221  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
222  }
223  uint64_t out_subblock_h() const {
224  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
225  }
226  uint64_t out_subblock_w() const {
227  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
228  }
229  uint64_t out_block_h() const {
230  return GetField<uint64_t>(VT_OUT_BLOCK_H, 0);
231  }
232  uint64_t out_block_w() const {
233  return GetField<uint64_t>(VT_OUT_BLOCK_W, 0);
234  }
235  uint64_t per_core_m() const {
236  return GetField<uint64_t>(VT_PER_CORE_M, 0);
237  }
238  uint64_t per_core_n() const {
239  return GetField<uint64_t>(VT_PER_CORE_N, 0);
240  }
241  bool transpose_mcast() const {
242  return GetField<uint8_t>(VT_TRANSPOSE_MCAST, 0) != 0;
243  }
244  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
245  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
246  }
247  bool fuse_batch() const {
248  return GetField<uint8_t>(VT_FUSE_BATCH, 0) != 0;
249  }
250  bool Verify(::flatbuffers::Verifier &verifier) const {
251  return VerifyTableStart(verifier) &&
252  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
253  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
254  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
255  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
256  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_H, 8) &&
257  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_W, 8) &&
258  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
259  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
260  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_MCAST, 1) &&
261  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
262  verifier.VerifyTable(fused_activation()) &&
263  VerifyField<uint8_t>(verifier, VT_FUSE_BATCH, 1) &&
264  verifier.EndTable();
265  }
266 };
267 
270  ::flatbuffers::FlatBufferBuilder &fbb_;
271  ::flatbuffers::uoffset_t start_;
272  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
274  }
275  void add_in0_block_w(uint64_t in0_block_w) {
276  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
277  }
278  void add_out_subblock_h(uint64_t out_subblock_h) {
279  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
280  }
281  void add_out_subblock_w(uint64_t out_subblock_w) {
282  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
283  }
284  void add_out_block_h(uint64_t out_block_h) {
285  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_BLOCK_H, out_block_h, 0);
286  }
287  void add_out_block_w(uint64_t out_block_w) {
288  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_OUT_BLOCK_W, out_block_w, 0);
289  }
290  void add_per_core_m(uint64_t per_core_m) {
291  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_PER_CORE_M, per_core_m, 0);
292  }
293  void add_per_core_n(uint64_t per_core_n) {
294  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_PER_CORE_N, per_core_n, 0);
295  }
296  void add_transpose_mcast(bool transpose_mcast) {
297  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_TRANSPOSE_MCAST, static_cast<uint8_t>(transpose_mcast), 0);
298  }
299  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
301  }
302  void add_fuse_batch(bool fuse_batch) {
303  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCastProgramConfig::VT_FUSE_BATCH, static_cast<uint8_t>(fuse_batch), 0);
304  }
305  explicit MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
306  : fbb_(_fbb) {
307  start_ = fbb_.StartTable();
308  }
309  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> Finish() {
310  const auto end = fbb_.EndTable(start_);
311  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig>(end);
312  return o;
313  }
314 };
315 
316 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> CreateMatmulMultiCoreReuseMultiCastProgramConfig(
317  ::flatbuffers::FlatBufferBuilder &_fbb,
318  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
319  uint64_t in0_block_w = 0,
320  uint64_t out_subblock_h = 0,
321  uint64_t out_subblock_w = 0,
322  uint64_t out_block_h = 0,
323  uint64_t out_block_w = 0,
324  uint64_t per_core_m = 0,
325  uint64_t per_core_n = 0,
326  bool transpose_mcast = false,
327  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
328  bool fuse_batch = false) {
330  builder_.add_per_core_n(per_core_n);
331  builder_.add_per_core_m(per_core_m);
332  builder_.add_out_block_w(out_block_w);
333  builder_.add_out_block_h(out_block_h);
334  builder_.add_out_subblock_w(out_subblock_w);
335  builder_.add_out_subblock_h(out_subblock_h);
336  builder_.add_in0_block_w(in0_block_w);
337  builder_.add_fused_activation(fused_activation);
338  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
339  builder_.add_fuse_batch(fuse_batch);
340  builder_.add_transpose_mcast(transpose_mcast);
341  return builder_.Finish();
342 }
343 
347 };
348 
349 struct MatmulMultiCoreReuseMultiCast1DProgramConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
351  struct Traits;
352  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
354  VT_IN0_BLOCK_W = 6,
355  VT_OUT_SUBBLOCK_H = 8,
356  VT_OUT_SUBBLOCK_W = 10,
357  VT_OUT_BLOCK_H = 12,
358  VT_OUT_BLOCK_W = 14,
359  VT_PER_CORE_M = 16,
360  VT_PER_CORE_N = 18,
362  VT_FUSED_ACTIVATION = 22,
366  VT_NUM_GLOBAL_CB_RECEIVERS = 30
367  };
368  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size() const {
369  return GetStruct<const tt::target::ttnn::CoreCoord *>(VT_COMPUTE_WITH_STORAGE_GRID_SIZE);
370  }
371  uint64_t in0_block_w() const {
372  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
373  }
374  uint64_t out_subblock_h() const {
375  return GetField<uint64_t>(VT_OUT_SUBBLOCK_H, 0);
376  }
377  uint64_t out_subblock_w() const {
378  return GetField<uint64_t>(VT_OUT_SUBBLOCK_W, 0);
379  }
380  uint64_t out_block_h() const {
381  return GetField<uint64_t>(VT_OUT_BLOCK_H, 0);
382  }
383  uint64_t out_block_w() const {
384  return GetField<uint64_t>(VT_OUT_BLOCK_W, 0);
385  }
386  uint64_t per_core_m() const {
387  return GetField<uint64_t>(VT_PER_CORE_M, 0);
388  }
389  uint64_t per_core_n() const {
390  return GetField<uint64_t>(VT_PER_CORE_N, 0);
391  }
392  bool fuse_batch() const {
393  return GetField<uint8_t>(VT_FUSE_BATCH, 0) != 0;
394  }
395  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
396  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
397  }
398  bool mcast_in0() const {
399  return GetField<uint8_t>(VT_MCAST_IN0, 0) != 0;
400  }
401  bool gather_in0() const {
402  return GetField<uint8_t>(VT_GATHER_IN0, 0) != 0;
403  }
404  const tt::target::ttnn::CoreRangeSet *hop_cores() const {
405  return GetPointer<const tt::target::ttnn::CoreRangeSet *>(VT_HOP_CORES);
406  }
407  uint64_t num_global_cb_receivers() const {
408  return GetField<uint64_t>(VT_NUM_GLOBAL_CB_RECEIVERS, 0);
409  }
410  bool Verify(::flatbuffers::Verifier &verifier) const {
411  return VerifyTableStart(verifier) &&
412  VerifyField<tt::target::ttnn::CoreCoord>(verifier, VT_COMPUTE_WITH_STORAGE_GRID_SIZE, 8) &&
413  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
414  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_H, 8) &&
415  VerifyField<uint64_t>(verifier, VT_OUT_SUBBLOCK_W, 8) &&
416  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_H, 8) &&
417  VerifyField<uint64_t>(verifier, VT_OUT_BLOCK_W, 8) &&
418  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
419  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
420  VerifyField<uint8_t>(verifier, VT_FUSE_BATCH, 1) &&
421  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
422  verifier.VerifyTable(fused_activation()) &&
423  VerifyField<uint8_t>(verifier, VT_MCAST_IN0, 1) &&
424  VerifyField<uint8_t>(verifier, VT_GATHER_IN0, 1) &&
425  VerifyOffset(verifier, VT_HOP_CORES) &&
426  verifier.VerifyTable(hop_cores()) &&
427  VerifyField<uint64_t>(verifier, VT_NUM_GLOBAL_CB_RECEIVERS, 8) &&
428  verifier.EndTable();
429  }
430 };
431 
434  ::flatbuffers::FlatBufferBuilder &fbb_;
435  ::flatbuffers::uoffset_t start_;
436  void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size) {
438  }
439  void add_in0_block_w(uint64_t in0_block_w) {
440  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_IN0_BLOCK_W, in0_block_w, 0);
441  }
442  void add_out_subblock_h(uint64_t out_subblock_h) {
443  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_SUBBLOCK_H, out_subblock_h, 0);
444  }
445  void add_out_subblock_w(uint64_t out_subblock_w) {
446  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_SUBBLOCK_W, out_subblock_w, 0);
447  }
448  void add_out_block_h(uint64_t out_block_h) {
449  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_BLOCK_H, out_block_h, 0);
450  }
451  void add_out_block_w(uint64_t out_block_w) {
452  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_OUT_BLOCK_W, out_block_w, 0);
453  }
454  void add_per_core_m(uint64_t per_core_m) {
455  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_PER_CORE_M, per_core_m, 0);
456  }
457  void add_per_core_n(uint64_t per_core_n) {
458  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_PER_CORE_N, per_core_n, 0);
459  }
460  void add_fuse_batch(bool fuse_batch) {
461  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_FUSE_BATCH, static_cast<uint8_t>(fuse_batch), 0);
462  }
463  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
465  }
466  void add_mcast_in0(bool mcast_in0) {
467  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_MCAST_IN0, static_cast<uint8_t>(mcast_in0), 0);
468  }
469  void add_gather_in0(bool gather_in0) {
470  fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_GATHER_IN0, static_cast<uint8_t>(gather_in0), 0);
471  }
472  void add_hop_cores(::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores) {
474  }
475  void add_num_global_cb_receivers(uint64_t num_global_cb_receivers) {
476  fbb_.AddElement<uint64_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_NUM_GLOBAL_CB_RECEIVERS, num_global_cb_receivers, 0);
477  }
478  explicit MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
479  : fbb_(_fbb) {
480  start_ = fbb_.StartTable();
481  }
482  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> Finish() {
483  const auto end = fbb_.EndTable(start_);
484  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig>(end);
485  return o;
486  }
487 };
488 
489 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(
490  ::flatbuffers::FlatBufferBuilder &_fbb,
491  const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size = nullptr,
492  uint64_t in0_block_w = 0,
493  uint64_t out_subblock_h = 0,
494  uint64_t out_subblock_w = 0,
495  uint64_t out_block_h = 0,
496  uint64_t out_block_w = 0,
497  uint64_t per_core_m = 0,
498  uint64_t per_core_n = 0,
499  bool fuse_batch = false,
500  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
501  bool mcast_in0 = false,
502  bool gather_in0 = false,
503  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores = 0,
504  uint64_t num_global_cb_receivers = 0) {
506  builder_.add_num_global_cb_receivers(num_global_cb_receivers);
507  builder_.add_per_core_n(per_core_n);
508  builder_.add_per_core_m(per_core_m);
509  builder_.add_out_block_w(out_block_w);
510  builder_.add_out_block_h(out_block_h);
511  builder_.add_out_subblock_w(out_subblock_w);
512  builder_.add_out_subblock_h(out_subblock_h);
513  builder_.add_in0_block_w(in0_block_w);
514  builder_.add_hop_cores(hop_cores);
515  builder_.add_fused_activation(fused_activation);
516  builder_.add_compute_with_storage_grid_size(compute_with_storage_grid_size);
517  builder_.add_gather_in0(gather_in0);
518  builder_.add_mcast_in0(mcast_in0);
519  builder_.add_fuse_batch(fuse_batch);
520  return builder_.Finish();
521 }
522 
526 };
527 
530  struct Traits;
531  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
532  VT_IN0_BLOCK_W = 4,
533  VT_PER_CORE_M = 6,
534  VT_PER_CORE_N = 8,
536  };
537  uint64_t in0_block_w() const {
538  return GetField<uint64_t>(VT_IN0_BLOCK_W, 0);
539  }
540  uint64_t per_core_m() const {
541  return GetField<uint64_t>(VT_PER_CORE_M, 0);
542  }
543  uint64_t per_core_n() const {
544  return GetField<uint64_t>(VT_PER_CORE_N, 0);
545  }
546  const tt::target::ttnn::UnaryWithParam *fused_activation() const {
547  return GetPointer<const tt::target::ttnn::UnaryWithParam *>(VT_FUSED_ACTIVATION);
548  }
549  bool Verify(::flatbuffers::Verifier &verifier) const {
550  return VerifyTableStart(verifier) &&
551  VerifyField<uint64_t>(verifier, VT_IN0_BLOCK_W, 8) &&
552  VerifyField<uint64_t>(verifier, VT_PER_CORE_M, 8) &&
553  VerifyField<uint64_t>(verifier, VT_PER_CORE_N, 8) &&
554  VerifyOffset(verifier, VT_FUSED_ACTIVATION) &&
555  verifier.VerifyTable(fused_activation()) &&
556  verifier.EndTable();
557  }
558 };
559 
562  ::flatbuffers::FlatBufferBuilder &fbb_;
563  ::flatbuffers::uoffset_t start_;
564  void add_in0_block_w(uint64_t in0_block_w) {
566  }
567  void add_per_core_m(uint64_t per_core_m) {
569  }
570  void add_per_core_n(uint64_t per_core_n) {
572  }
573  void add_fused_activation(::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation) {
575  }
576  explicit MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
577  : fbb_(_fbb) {
578  start_ = fbb_.StartTable();
579  }
580  ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> Finish() {
581  const auto end = fbb_.EndTable(start_);
582  auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>(end);
583  return o;
584  }
585 };
586 
587 inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(
588  ::flatbuffers::FlatBufferBuilder &_fbb,
589  uint64_t in0_block_w = 0,
590  uint64_t per_core_m = 0,
591  uint64_t per_core_n = 0,
592  ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0) {
594  builder_.add_per_core_n(per_core_n);
595  builder_.add_per_core_m(per_core_m);
596  builder_.add_in0_block_w(in0_block_w);
597  builder_.add_fused_activation(fused_activation);
598  return builder_.Finish();
599 }
600 
604 };
605 
606 struct MatmulOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
608  struct Traits;
609  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
610  VT_A = 4,
611  VT_B = 6,
612  VT_OUT = 8,
616  VT_MATMUL_PROGRAM_CONFIG = 16
617  };
618  const tt::target::ttnn::TensorRef *a() const {
619  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_A);
620  }
621  const tt::target::ttnn::TensorRef *b() const {
622  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_B);
623  }
624  const tt::target::ttnn::TensorRef *out() const {
625  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
626  }
627  bool transpose_a() const {
628  return GetField<uint8_t>(VT_TRANSPOSE_A, 0) != 0;
629  }
630  bool transpose_b() const {
631  return GetField<uint8_t>(VT_TRANSPOSE_B, 0) != 0;
632  }
634  return static_cast<tt::target::ttnn::MatmulProgramConfig>(GetField<uint8_t>(VT_MATMUL_PROGRAM_CONFIG_TYPE, 0));
635  }
636  const void *matmul_program_config() const {
637  return GetPointer<const void *>(VT_MATMUL_PROGRAM_CONFIG);
638  }
639  template<typename T> const T *matmul_program_config_as() const;
640  const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const {
641  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *>(matmul_program_config()) : nullptr;
642  }
643  const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const {
644  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCastProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *>(matmul_program_config()) : nullptr;
645  }
646  const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const {
647  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCast1DProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *>(matmul_program_config()) : nullptr;
648  }
649  const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const {
650  return matmul_program_config_type() == tt::target::ttnn::MatmulProgramConfig::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig ? static_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *>(matmul_program_config()) : nullptr;
651  }
652  bool Verify(::flatbuffers::Verifier &verifier) const {
653  return VerifyTableStart(verifier) &&
654  VerifyOffset(verifier, VT_A) &&
655  verifier.VerifyTable(a()) &&
656  VerifyOffset(verifier, VT_B) &&
657  verifier.VerifyTable(b()) &&
658  VerifyOffset(verifier, VT_OUT) &&
659  verifier.VerifyTable(out()) &&
660  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_A, 1) &&
661  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_B, 1) &&
662  VerifyField<uint8_t>(verifier, VT_MATMUL_PROGRAM_CONFIG_TYPE, 1) &&
663  VerifyOffset(verifier, VT_MATMUL_PROGRAM_CONFIG) &&
664  VerifyMatmulProgramConfig(verifier, matmul_program_config(), matmul_program_config_type()) &&
665  verifier.EndTable();
666  }
667 };
668 
669 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig>() const {
670  return matmul_program_config_as_MatmulMultiCoreReuseProgramConfig();
671 }
672 
673 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig>() const {
674  return matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig();
675 }
676 
677 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig>() const {
678  return matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig();
679 }
680 
681 template<> inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>() const {
682  return matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig();
683 }
684 
686  typedef MatmulOp Table;
687  ::flatbuffers::FlatBufferBuilder &fbb_;
688  ::flatbuffers::uoffset_t start_;
689  void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
690  fbb_.AddOffset(MatmulOp::VT_A, a);
691  }
692  void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
693  fbb_.AddOffset(MatmulOp::VT_B, b);
694  }
695  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
696  fbb_.AddOffset(MatmulOp::VT_OUT, out);
697  }
698  void add_transpose_a(bool transpose_a) {
699  fbb_.AddElement<uint8_t>(MatmulOp::VT_TRANSPOSE_A, static_cast<uint8_t>(transpose_a), 0);
700  }
701  void add_transpose_b(bool transpose_b) {
702  fbb_.AddElement<uint8_t>(MatmulOp::VT_TRANSPOSE_B, static_cast<uint8_t>(transpose_b), 0);
703  }
705  fbb_.AddElement<uint8_t>(MatmulOp::VT_MATMUL_PROGRAM_CONFIG_TYPE, static_cast<uint8_t>(matmul_program_config_type), 0);
706  }
707  void add_matmul_program_config(::flatbuffers::Offset<void> matmul_program_config) {
708  fbb_.AddOffset(MatmulOp::VT_MATMUL_PROGRAM_CONFIG, matmul_program_config);
709  }
710  explicit MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
711  : fbb_(_fbb) {
712  start_ = fbb_.StartTable();
713  }
714  ::flatbuffers::Offset<MatmulOp> Finish() {
715  const auto end = fbb_.EndTable(start_);
716  auto o = ::flatbuffers::Offset<MatmulOp>(end);
717  return o;
718  }
719 };
720 
721 inline ::flatbuffers::Offset<MatmulOp> CreateMatmulOp(
722  ::flatbuffers::FlatBufferBuilder &_fbb,
723  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
724  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
725  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
726  bool transpose_a = false,
727  bool transpose_b = false,
729  ::flatbuffers::Offset<void> matmul_program_config = 0) {
730  MatmulOpBuilder builder_(_fbb);
731  builder_.add_matmul_program_config(matmul_program_config);
732  builder_.add_out(out);
733  builder_.add_b(b);
734  builder_.add_a(a);
735  builder_.add_matmul_program_config_type(matmul_program_config_type);
736  builder_.add_transpose_b(transpose_b);
737  builder_.add_transpose_a(transpose_a);
738  return builder_.Finish();
739 }
740 
742  using type = MatmulOp;
743  static auto constexpr Create = CreateMatmulOp;
744 };
745 
746 struct LinearOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
748  struct Traits;
749  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
750  VT_A = 4,
751  VT_B = 6,
752  VT_BIAS = 8,
753  VT_OUT = 10,
754  VT_TRANSPOSE_A = 12,
755  VT_TRANSPOSE_B = 14
756  };
757  const tt::target::ttnn::TensorRef *a() const {
758  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_A);
759  }
760  const tt::target::ttnn::TensorRef *b() const {
761  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_B);
762  }
763  const tt::target::ttnn::TensorRef *bias() const {
764  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
765  }
766  const tt::target::ttnn::TensorRef *out() const {
767  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
768  }
769  bool transpose_a() const {
770  return GetField<uint8_t>(VT_TRANSPOSE_A, 0) != 0;
771  }
772  bool transpose_b() const {
773  return GetField<uint8_t>(VT_TRANSPOSE_B, 0) != 0;
774  }
775  bool Verify(::flatbuffers::Verifier &verifier) const {
776  return VerifyTableStart(verifier) &&
777  VerifyOffset(verifier, VT_A) &&
778  verifier.VerifyTable(a()) &&
779  VerifyOffset(verifier, VT_B) &&
780  verifier.VerifyTable(b()) &&
781  VerifyOffset(verifier, VT_BIAS) &&
782  verifier.VerifyTable(bias()) &&
783  VerifyOffset(verifier, VT_OUT) &&
784  verifier.VerifyTable(out()) &&
785  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_A, 1) &&
786  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_B, 1) &&
787  verifier.EndTable();
788  }
789 };
790 
792  typedef LinearOp Table;
793  ::flatbuffers::FlatBufferBuilder &fbb_;
794  ::flatbuffers::uoffset_t start_;
795  void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
796  fbb_.AddOffset(LinearOp::VT_A, a);
797  }
798  void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
799  fbb_.AddOffset(LinearOp::VT_B, b);
800  }
801  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
802  fbb_.AddOffset(LinearOp::VT_BIAS, bias);
803  }
804  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
805  fbb_.AddOffset(LinearOp::VT_OUT, out);
806  }
807  void add_transpose_a(bool transpose_a) {
808  fbb_.AddElement<uint8_t>(LinearOp::VT_TRANSPOSE_A, static_cast<uint8_t>(transpose_a), 0);
809  }
810  void add_transpose_b(bool transpose_b) {
811  fbb_.AddElement<uint8_t>(LinearOp::VT_TRANSPOSE_B, static_cast<uint8_t>(transpose_b), 0);
812  }
813  explicit LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
814  : fbb_(_fbb) {
815  start_ = fbb_.StartTable();
816  }
817  ::flatbuffers::Offset<LinearOp> Finish() {
818  const auto end = fbb_.EndTable(start_);
819  auto o = ::flatbuffers::Offset<LinearOp>(end);
820  return o;
821  }
822 };
823 
824 inline ::flatbuffers::Offset<LinearOp> CreateLinearOp(
825  ::flatbuffers::FlatBufferBuilder &_fbb,
826  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
827  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
828  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
829  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
830  bool transpose_a = false,
831  bool transpose_b = false) {
832  LinearOpBuilder builder_(_fbb);
833  builder_.add_out(out);
834  builder_.add_bias(bias);
835  builder_.add_b(b);
836  builder_.add_a(a);
837  builder_.add_transpose_b(transpose_b);
838  builder_.add_transpose_a(transpose_a);
839  return builder_.Finish();
840 }
841 
843  using type = LinearOp;
844  static auto constexpr Create = CreateLinearOp;
845 };
846 
847 inline bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type) {
848  switch (type) {
850  return true;
851  }
853  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *>(obj);
854  return verifier.VerifyTable(ptr);
855  }
857  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *>(obj);
858  return verifier.VerifyTable(ptr);
859  }
861  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *>(obj);
862  return verifier.VerifyTable(ptr);
863  }
865  auto ptr = reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *>(obj);
866  return verifier.VerifyTable(ptr);
867  }
868  default: return true;
869  }
870 }
871 
872 inline bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types) {
873  if (!values || !types) return !values && !types;
874  if (values->size() != types->size()) return false;
875  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
877  verifier, values->Get(i), types->GetEnum<MatmulProgramConfig>(i))) {
878  return false;
879  }
880  }
881  return true;
882 }
883 
884 } // namespace ttnn
885 } // namespace target
886 } // namespace tt
887 
888 #endif // FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
VT_TRANSPOSE_A
Definition: matmul_generated.h:613
VT_GATHER_IN0
Definition: matmul_generated.h:364
VT_TRANSPOSE_B
Definition: matmul_generated.h:614
VT_COMPUTE_WITH_STORAGE_GRID_SIZE
Definition: matmul_generated.h:108
VT_MCAST_IN0
Definition: matmul_generated.h:363
VT_PER_CORE_N
Definition: matmul_generated.h:212
VT_FUSED_ACTIVATION
Definition: matmul_generated.h:214
VT_PER_CORE_M
Definition: matmul_generated.h:112
VT_OUT_SUBBLOCK_H
Definition: matmul_generated.h:110
VT_TRANSPOSE_MCAST
Definition: matmul_generated.h:213
VT_OUT_BLOCK_H
Definition: matmul_generated.h:209
VT_OUT_SUBBLOCK_W
Definition: matmul_generated.h:111
VT_A
Definition: matmul_generated.h:610
VT_B
Definition: matmul_generated.h:611
VT_BIAS
Definition: matmul_generated.h:752
VT_OUT
Definition: matmul_generated.h:612
VT_IN0_BLOCK_W
Definition: matmul_generated.h:109
VT_HOP_CORES
Definition: matmul_generated.h:365
VT_OUT_BLOCK_W
Definition: matmul_generated.h:210
VT_FUSE_BATCH
Definition: matmul_generated.h:361
VT_MATMUL_PROGRAM_CONFIG_TYPE
Definition: matmul_generated.h:615
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0)
Definition: matmul_generated.h:489
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0)
Definition: matmul_generated.h:587
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > CreateMatmulMultiCoreReuseProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0)
Definition: matmul_generated.h:178
const char *const * EnumNamesMatmulProgramConfig()
Definition: matmul_generated.h:63
MatmulProgramConfig
Definition: matmul_generated.h:42
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > CreateMatmulMultiCoreReuseMultiCastProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false)
Definition: matmul_generated.h:316
bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types)
Definition: matmul_generated.h:872
const MatmulProgramConfig(& EnumValuesMatmulProgramConfig())[5]
Definition: matmul_generated.h:52
const char * EnumNameMatmulProgramConfig(MatmulProgramConfig e)
Definition: matmul_generated.h:75
inline ::flatbuffers::Offset< MatmulOp > CreateMatmulOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0)
Definition: matmul_generated.h:721
bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type)
Definition: matmul_generated.h:847
inline ::flatbuffers::Offset< LinearOp > CreateLinearOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false)
Definition: matmul_generated.h:824
Definition: debug_info_generated.h:18
Definition: binary_generated.h:31
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const
Definition: matmul_generated.h:643
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const
Definition: matmul_generated.h:646
const tt::target::ttnn::TensorRef * bias() const
Definition: matmul_generated.h:763
uint64_t per_core_n() const
Definition: matmul_generated.h:130
const tt::target::ttnn::UnaryWithParam * fused_activation() const
Definition: matmul_generated.h:244
tt::target::ttnn::MatmulProgramConfig matmul_program_config_type() const
Definition: matmul_generated.h:633
bool transpose_mcast() const
Definition: matmul_generated.h:241
bool gather_in0() const
Definition: matmul_generated.h:401
const tt::target::ttnn::TensorRef * out() const
Definition: matmul_generated.h:624
uint64_t out_block_h() const
Definition: matmul_generated.h:229
uint64_t out_block_w() const
Definition: matmul_generated.h:232
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const
Definition: matmul_generated.h:649
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const
Definition: matmul_generated.h:640
bool fuse_batch() const
Definition: matmul_generated.h:247
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder Builder
Definition: matmul_generated.h:350
const void * matmul_program_config() const
Definition: matmul_generated.h:636
uint64_t out_subblock_w() const
Definition: matmul_generated.h:124
MatmulMultiCoreReuseProgramConfigBuilder Builder
Definition: matmul_generated.h:105
const tt::target::ttnn::CoreRangeSet * hop_cores() const
Definition: matmul_generated.h:404
bool transpose_a() const
Definition: matmul_generated.h:627
bool mcast_in0() const
Definition: matmul_generated.h:398
const tt::target::ttnn::TensorRef * b() const
Definition: matmul_generated.h:621
const tt::target::ttnn::TensorRef * a() const
Definition: matmul_generated.h:618
uint64_t per_core_m() const
Definition: matmul_generated.h:127
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder Builder
Definition: matmul_generated.h:529
MatmulMultiCoreReuseMultiCastProgramConfigBuilder Builder
Definition: matmul_generated.h:202
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: matmul_generated.h:133
const tt::target::ttnn::CoreCoord * compute_with_storage_grid_size() const
Definition: matmul_generated.h:115
bool transpose_b() const
Definition: matmul_generated.h:630
uint64_t num_global_cb_receivers() const
Definition: matmul_generated.h:407
uint64_t out_subblock_h() const
Definition: matmul_generated.h:121
LinearOpBuilder Builder
Definition: matmul_generated.h:747
uint64_t in0_block_w() const
Definition: matmul_generated.h:118
MatmulOpBuilder Builder
Definition: matmul_generated.h:607
Definition: matmul_generated.h:791
LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:813
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:804
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:795
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:798
::flatbuffers::Offset< LinearOp > Finish()
Definition: matmul_generated.h:817
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:794
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:807
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:810
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: matmul_generated.h:801
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:793
LinearOp Table
Definition: matmul_generated.h:792
Definition: matmul_generated.h:842
static constexpr auto Create
Definition: matmul_generated.h:844
LinearOp type
Definition: matmul_generated.h:843
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:478
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:435
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:457
void add_gather_in0(bool gather_in0)
Definition: matmul_generated.h:469
void add_hop_cores(::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores)
Definition: matmul_generated.h:472
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > Finish()
Definition: matmul_generated.h:482
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:434
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:439
void add_num_global_cb_receivers(uint64_t num_global_cb_receivers)
Definition: matmul_generated.h:475
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:463
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:460
MatmulMultiCoreReuseMultiCast1DProgramConfig Table
Definition: matmul_generated.h:433
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:448
void add_mcast_in0(bool mcast_in0)
Definition: matmul_generated.h:466
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:451
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:442
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:436
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:454
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:445
static constexpr auto Create
Definition: matmul_generated.h:525
MatmulMultiCoreReuseMultiCast1DProgramConfig type
Definition: matmul_generated.h:524
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:570
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig Table
Definition: matmul_generated.h:561
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:576
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:562
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:573
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:567
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:563
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > Finish()
Definition: matmul_generated.h:580
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:564
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig type
Definition: matmul_generated.h:602
static constexpr auto Create
Definition: matmul_generated.h:603
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:270
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:281
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:275
void add_transpose_mcast(bool transpose_mcast)
Definition: matmul_generated.h:296
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:284
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:293
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:287
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:272
MatmulMultiCoreReuseMultiCastProgramConfig Table
Definition: matmul_generated.h:269
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:278
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:299
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:302
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:290
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > Finish()
Definition: matmul_generated.h:309
MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:305
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:271
MatmulMultiCoreReuseMultiCastProgramConfig type
Definition: matmul_generated.h:345
static constexpr auto Create
Definition: matmul_generated.h:346
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:149
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:155
MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:167
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:147
::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > Finish()
Definition: matmul_generated.h:171
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:161
MatmulMultiCoreReuseProgramConfig Table
Definition: matmul_generated.h:146
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:148
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:158
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:152
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:164
static constexpr auto Create
Definition: matmul_generated.h:198
MatmulMultiCoreReuseProgramConfig type
Definition: matmul_generated.h:197
Definition: matmul_generated.h:685
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:688
void add_matmul_program_config_type(tt::target::ttnn::MatmulProgramConfig matmul_program_config_type)
Definition: matmul_generated.h:704
::flatbuffers::Offset< MatmulOp > Finish()
Definition: matmul_generated.h:714
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:692
MatmulOp Table
Definition: matmul_generated.h:686
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:698
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:701
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:695
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:689
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:687
void add_matmul_program_config(::flatbuffers::Offset< void > matmul_program_config)
Definition: matmul_generated.h:707
MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:710
Definition: matmul_generated.h:741
static constexpr auto Create
Definition: matmul_generated.h:743
MatmulOp type
Definition: matmul_generated.h:742
Definition: matmul_generated.h:81
static const MatmulProgramConfig enum_value
Definition: matmul_generated.h:82