4 #ifndef FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_MATMUL_TT_TARGET_TTNN_H_
7 #include "flatbuffers/flatbuffers.h"
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12 FLATBUFFERS_VERSION_MINOR == 3 &&
13 FLATBUFFERS_VERSION_REVISION == 25,
14 "Non-compatible flatbuffers version included");
25 struct MatmulMultiCoreReuseProgramConfigBuilder;
28 struct MatmulMultiCoreReuseMultiCastProgramConfigBuilder;
31 struct MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder;
34 struct MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder;
37 struct MatmulOpBuilder;
40 struct LinearOpBuilder;
64 static const char *
const names[6] = {
66 "MatmulMultiCoreReuseProgramConfig",
67 "MatmulMultiCoreReuseMultiCastProgramConfig",
68 "MatmulMultiCoreReuseMultiCast1DProgramConfig",
69 "MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig",
77 const size_t index =
static_cast<size_t>(e);
102 bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types);
108 return "tt.target.ttnn.MatmulMultiCoreReuseProgramConfig";
110 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
136 bool Verify(::flatbuffers::Verifier &verifier)
const {
137 return VerifyTableStart(verifier) &&
150 ::flatbuffers::FlatBufferBuilder &
fbb_;
174 ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig>
Finish() {
176 auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig>(end);
182 ::flatbuffers::FlatBufferBuilder &_fbb,
183 const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size =
nullptr,
184 uint64_t in0_block_w = 0,
185 uint64_t out_subblock_h = 0,
186 uint64_t out_subblock_w = 0,
187 uint64_t per_core_m = 0,
188 uint64_t per_core_n = 0) {
208 return "tt.target.ttnn.MatmulMultiCoreReuseMultiCastProgramConfig";
210 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
256 bool Verify(::flatbuffers::Verifier &verifier)
const {
257 return VerifyTableStart(verifier) &&
268 verifier.VerifyTable(fused_activation()) &&
276 ::flatbuffers::FlatBufferBuilder &
fbb_;
315 ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig>
Finish() {
317 auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig>(end);
323 ::flatbuffers::FlatBufferBuilder &_fbb,
324 const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size =
nullptr,
325 uint64_t in0_block_w = 0,
326 uint64_t out_subblock_h = 0,
327 uint64_t out_subblock_w = 0,
328 uint64_t out_block_h = 0,
329 uint64_t out_block_w = 0,
330 uint64_t per_core_m = 0,
331 uint64_t per_core_n = 0,
332 bool transpose_mcast =
false,
333 ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
334 bool fuse_batch =
false) {
359 return "tt.target.ttnn.MatmulMultiCoreReuseMultiCast1DProgramConfig";
361 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
414 const tt::target::ttnn::CoreRangeSet *
hop_cores()
const {
415 return GetPointer<const tt::target::ttnn::CoreRangeSet *>(
VT_HOP_CORES);
421 return GetField<uint8_t>(VT_UNTILIZE_OUT, 0) != 0;
423 bool Verify(::flatbuffers::Verifier &verifier)
const {
424 return VerifyTableStart(verifier) &&
435 verifier.VerifyTable(fused_activation()) &&
439 verifier.VerifyTable(hop_cores()) &&
441 VerifyField<uint8_t>(verifier, VT_UNTILIZE_OUT, 1) &&
448 ::flatbuffers::FlatBufferBuilder &
fbb_;
486 void add_hop_cores(::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores) {
493 fbb_.AddElement<uint8_t>(MatmulMultiCoreReuseMultiCast1DProgramConfig::VT_UNTILIZE_OUT,
static_cast<uint8_t
>(untilize_out), 0);
499 ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig>
Finish() {
501 auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig>(end);
507 ::flatbuffers::FlatBufferBuilder &_fbb,
508 const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size =
nullptr,
509 uint64_t in0_block_w = 0,
510 uint64_t out_subblock_h = 0,
511 uint64_t out_subblock_w = 0,
512 uint64_t out_block_h = 0,
513 uint64_t out_block_w = 0,
514 uint64_t per_core_m = 0,
515 uint64_t per_core_n = 0,
516 bool fuse_batch =
false,
517 ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0,
518 bool mcast_in0 =
false,
519 bool gather_in0 =
false,
520 ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> hop_cores = 0,
521 uint64_t num_global_cb_receivers = 0,
522 bool untilize_out =
false) {
551 return "tt.target.ttnn.MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig";
553 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
571 bool Verify(::flatbuffers::Verifier &verifier)
const {
572 return VerifyTableStart(verifier) &&
577 verifier.VerifyTable(fused_activation()) &&
584 ::flatbuffers::FlatBufferBuilder &
fbb_;
602 ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>
Finish() {
604 auto o = ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>(end);
610 ::flatbuffers::FlatBufferBuilder &_fbb,
611 uint64_t in0_block_w = 0,
612 uint64_t per_core_m = 0,
613 uint64_t per_core_n = 0,
614 ::flatbuffers::Offset<tt::target::ttnn::UnaryWithParam> fused_activation = 0) {
632 return "tt.target.ttnn.MatmulOp";
634 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
641 VT_MATMUL_PROGRAM_CONFIG = 16
643 const tt::target::ttnn::TensorRef *
a()
const {
644 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_A);
646 const tt::target::ttnn::TensorRef *
b()
const {
647 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_B);
649 const tt::target::ttnn::TensorRef *
out()
const {
650 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_OUT);
662 return GetPointer<const void *>(VT_MATMUL_PROGRAM_CONFIG);
677 bool Verify(::flatbuffers::Verifier &verifier)
const {
678 return VerifyTableStart(verifier) &&
679 VerifyOffset(verifier,
VT_A) &&
680 verifier.VerifyTable(a()) &&
681 VerifyOffset(verifier,
VT_B) &&
682 verifier.VerifyTable(b()) &&
683 VerifyOffset(verifier,
VT_OUT) &&
684 verifier.VerifyTable(out()) &&
688 VerifyOffset(verifier, VT_MATMUL_PROGRAM_CONFIG) &&
694 template<>
inline const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseProgramConfig>()
const {
695 return matmul_program_config_as_MatmulMultiCoreReuseProgramConfig();
698 template<>
inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig>()
const {
699 return matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig();
702 template<>
inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig>()
const {
703 return matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig();
706 template<>
inline const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *MatmulOp::matmul_program_config_as<tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig>()
const {
707 return matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig();
712 ::flatbuffers::FlatBufferBuilder &
fbb_;
714 void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
717 void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
720 void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
733 fbb_.AddOffset(MatmulOp::VT_MATMUL_PROGRAM_CONFIG, matmul_program_config);
739 ::flatbuffers::Offset<MatmulOp>
Finish() {
741 auto o = ::flatbuffers::Offset<MatmulOp>(end);
747 ::flatbuffers::FlatBufferBuilder &_fbb,
748 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
749 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
750 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
751 bool transpose_a =
false,
752 bool transpose_b =
false,
754 ::flatbuffers::Offset<void> matmul_program_config = 0) {
775 return "tt.target.ttnn.LinearOp";
777 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
785 const tt::target::ttnn::TensorRef *
a()
const {
786 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_A);
788 const tt::target::ttnn::TensorRef *
b()
const {
789 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_B);
791 const tt::target::ttnn::TensorRef *
bias()
const {
792 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_BIAS);
794 const tt::target::ttnn::TensorRef *
out()
const {
795 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_OUT);
803 bool Verify(::flatbuffers::Verifier &verifier)
const {
804 return VerifyTableStart(verifier) &&
805 VerifyOffset(verifier,
VT_A) &&
806 verifier.VerifyTable(a()) &&
807 VerifyOffset(verifier,
VT_B) &&
808 verifier.VerifyTable(b()) &&
809 VerifyOffset(verifier,
VT_BIAS) &&
810 verifier.VerifyTable(bias()) &&
811 VerifyOffset(verifier,
VT_OUT) &&
812 verifier.VerifyTable(out()) &&
821 ::flatbuffers::FlatBufferBuilder &
fbb_;
823 void add_a(::flatbuffers::Offset<tt::target::ttnn::TensorRef> a) {
826 void add_b(::flatbuffers::Offset<tt::target::ttnn::TensorRef> b) {
829 void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
832 void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
845 ::flatbuffers::Offset<LinearOp>
Finish() {
847 auto o = ::flatbuffers::Offset<LinearOp>(end);
853 ::flatbuffers::FlatBufferBuilder &_fbb,
854 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> a = 0,
855 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> b = 0,
856 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
857 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
858 bool transpose_a =
false,
859 bool transpose_b =
false) {
881 auto ptr =
reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig *
>(obj);
882 return verifier.VerifyTable(ptr);
885 auto ptr =
reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig *
>(obj);
886 return verifier.VerifyTable(ptr);
889 auto ptr =
reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig *
>(obj);
890 return verifier.VerifyTable(ptr);
893 auto ptr =
reinterpret_cast<const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig *
>(obj);
894 return verifier.VerifyTable(ptr);
896 default:
return true;
900 inline bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<MatmulProgramConfig> *types) {
901 if (!values || !types)
return !values && !types;
902 if (values->size() != types->size())
return false;
903 for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
VT_TRANSPOSE_A
Definition: matmul_generated.h:638
VT_GATHER_IN0
Definition: matmul_generated.h:373
VT_TRANSPOSE_B
Definition: matmul_generated.h:639
VT_COMPUTE_WITH_STORAGE_GRID_SIZE
Definition: matmul_generated.h:111
VT_MCAST_IN0
Definition: matmul_generated.h:372
VT_PER_CORE_N
Definition: matmul_generated.h:218
VT_FUSED_ACTIVATION
Definition: matmul_generated.h:220
VT_PER_CORE_M
Definition: matmul_generated.h:115
VT_OUT_SUBBLOCK_H
Definition: matmul_generated.h:113
VT_TRANSPOSE_MCAST
Definition: matmul_generated.h:219
VT_OUT_BLOCK_H
Definition: matmul_generated.h:215
VT_OUT_SUBBLOCK_W
Definition: matmul_generated.h:114
VT_A
Definition: matmul_generated.h:635
VT_NUM_GLOBAL_CB_RECEIVERS
Definition: matmul_generated.h:375
VT_B
Definition: matmul_generated.h:636
VT_BIAS
Definition: matmul_generated.h:780
VT_OUT
Definition: matmul_generated.h:637
VT_IN0_BLOCK_W
Definition: matmul_generated.h:112
VT_HOP_CORES
Definition: matmul_generated.h:374
VT_OUT_BLOCK_W
Definition: matmul_generated.h:216
VT_FUSE_BATCH
Definition: matmul_generated.h:370
VT_MATMUL_PROGRAM_CONFIG_TYPE
Definition: matmul_generated.h:640
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0)
Definition: matmul_generated.h:609
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > CreateMatmulMultiCoreReuseProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0)
Definition: matmul_generated.h:181
const char *const * EnumNamesMatmulProgramConfig()
Definition: matmul_generated.h:63
MatmulProgramConfig
Definition: matmul_generated.h:42
@ MatmulMultiCoreReuseProgramConfig
@ MatmulMultiCoreReuseMultiCastProgramConfig
@ MatmulMultiCoreReuseMultiCast1DProgramConfig
@ MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > CreateMatmulMultiCoreReuseMultiCast1DProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0, bool untilize_out=false)
Definition: matmul_generated.h:506
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > CreateMatmulMultiCoreReuseMultiCastProgramConfig(::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false)
Definition: matmul_generated.h:322
bool VerifyMatmulProgramConfigVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types)
Definition: matmul_generated.h:900
const MatmulProgramConfig(& EnumValuesMatmulProgramConfig())[5]
Definition: matmul_generated.h:52
const char * EnumNameMatmulProgramConfig(MatmulProgramConfig e)
Definition: matmul_generated.h:75
inline ::flatbuffers::Offset< MatmulOp > CreateMatmulOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0)
Definition: matmul_generated.h:746
bool VerifyMatmulProgramConfig(::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type)
Definition: matmul_generated.h:875
inline ::flatbuffers::Offset< LinearOp > CreateLinearOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false)
Definition: matmul_generated.h:852
Definition: debug_info_generated.h:18
Definition: binary_generated.h:31
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastProgramConfig() const
Definition: matmul_generated.h:668
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCast1DProgramConfig() const
Definition: matmul_generated.h:671
const tt::target::ttnn::TensorRef * bias() const
Definition: matmul_generated.h:791
uint64_t per_core_n() const
Definition: matmul_generated.h:133
const tt::target::ttnn::UnaryWithParam * fused_activation() const
Definition: matmul_generated.h:250
tt::target::ttnn::MatmulProgramConfig matmul_program_config_type() const
Definition: matmul_generated.h:658
bool transpose_mcast() const
Definition: matmul_generated.h:247
bool gather_in0() const
Definition: matmul_generated.h:411
const tt::target::ttnn::TensorRef * out() const
Definition: matmul_generated.h:649
uint64_t out_block_h() const
Definition: matmul_generated.h:235
uint64_t out_block_w() const
Definition: matmul_generated.h:238
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig() const
Definition: matmul_generated.h:674
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * matmul_program_config_as_MatmulMultiCoreReuseProgramConfig() const
Definition: matmul_generated.h:665
bool fuse_batch() const
Definition: matmul_generated.h:253
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder Builder
Definition: matmul_generated.h:356
const void * matmul_program_config() const
Definition: matmul_generated.h:661
uint64_t out_subblock_w() const
Definition: matmul_generated.h:127
bool untilize_out() const
Definition: matmul_generated.h:420
MatmulMultiCoreReuseProgramConfigBuilder Builder
Definition: matmul_generated.h:105
const tt::target::ttnn::CoreRangeSet * hop_cores() const
Definition: matmul_generated.h:414
bool transpose_a() const
Definition: matmul_generated.h:652
bool mcast_in0() const
Definition: matmul_generated.h:408
const T * matmul_program_config_as() const
const tt::target::ttnn::TensorRef * b() const
Definition: matmul_generated.h:646
const tt::target::ttnn::TensorRef * a() const
Definition: matmul_generated.h:643
uint64_t per_core_m() const
Definition: matmul_generated.h:130
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder Builder
Definition: matmul_generated.h:548
MatmulMultiCoreReuseMultiCastProgramConfigBuilder Builder
Definition: matmul_generated.h:205
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: matmul_generated.h:136
const tt::target::ttnn::CoreCoord * compute_with_storage_grid_size() const
Definition: matmul_generated.h:118
bool transpose_b() const
Definition: matmul_generated.h:655
uint64_t num_global_cb_receivers() const
Definition: matmul_generated.h:417
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: matmul_generated.h:107
uint64_t out_subblock_h() const
Definition: matmul_generated.h:124
LinearOpBuilder Builder
Definition: matmul_generated.h:772
uint64_t in0_block_w() const
Definition: matmul_generated.h:121
MatmulOpBuilder Builder
Definition: matmul_generated.h:629
Definition: matmul_generated.h:819
LinearOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:841
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:832
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:823
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:826
::flatbuffers::Offset< LinearOp > Finish()
Definition: matmul_generated.h:845
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:822
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:835
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:838
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: matmul_generated.h:829
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:821
LinearOp Table
Definition: matmul_generated.h:820
Definition: matmul_generated.h:870
static constexpr auto Create
Definition: matmul_generated.h:872
LinearOp type
Definition: matmul_generated.h:871
Definition: matmul_generated.h:446
MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:495
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:449
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:471
void add_gather_in0(bool gather_in0)
Definition: matmul_generated.h:483
void add_hop_cores(::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores)
Definition: matmul_generated.h:486
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > Finish()
Definition: matmul_generated.h:499
void add_untilize_out(bool untilize_out)
Definition: matmul_generated.h:492
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:448
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:453
void add_num_global_cb_receivers(uint64_t num_global_cb_receivers)
Definition: matmul_generated.h:489
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:477
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:474
MatmulMultiCoreReuseMultiCast1DProgramConfig Table
Definition: matmul_generated.h:447
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:462
void add_mcast_in0(bool mcast_in0)
Definition: matmul_generated.h:480
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:465
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:456
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:450
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:468
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:459
Definition: matmul_generated.h:542
static constexpr auto Create
Definition: matmul_generated.h:544
MatmulMultiCoreReuseMultiCast1DProgramConfig type
Definition: matmul_generated.h:543
Definition: matmul_generated.h:582
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:592
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig Table
Definition: matmul_generated.h:583
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:598
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:584
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:595
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:589
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:585
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > Finish()
Definition: matmul_generated.h:602
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:586
Definition: matmul_generated.h:623
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig type
Definition: matmul_generated.h:624
static constexpr auto Create
Definition: matmul_generated.h:625
Definition: matmul_generated.h:274
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:276
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:287
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:281
void add_transpose_mcast(bool transpose_mcast)
Definition: matmul_generated.h:302
void add_out_block_h(uint64_t out_block_h)
Definition: matmul_generated.h:290
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:299
void add_out_block_w(uint64_t out_block_w)
Definition: matmul_generated.h:293
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:278
MatmulMultiCoreReuseMultiCastProgramConfig Table
Definition: matmul_generated.h:275
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:284
void add_fused_activation(::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation)
Definition: matmul_generated.h:305
void add_fuse_batch(bool fuse_batch)
Definition: matmul_generated.h:308
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:296
::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > Finish()
Definition: matmul_generated.h:315
MatmulMultiCoreReuseMultiCastProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:311
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:277
Definition: matmul_generated.h:350
MatmulMultiCoreReuseMultiCastProgramConfig type
Definition: matmul_generated.h:351
static constexpr auto Create
Definition: matmul_generated.h:352
Definition: matmul_generated.h:148
void add_compute_with_storage_grid_size(const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size)
Definition: matmul_generated.h:152
void add_out_subblock_h(uint64_t out_subblock_h)
Definition: matmul_generated.h:158
MatmulMultiCoreReuseProgramConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:170
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:150
::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > Finish()
Definition: matmul_generated.h:174
void add_per_core_m(uint64_t per_core_m)
Definition: matmul_generated.h:164
MatmulMultiCoreReuseProgramConfig Table
Definition: matmul_generated.h:149
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:151
void add_out_subblock_w(uint64_t out_subblock_w)
Definition: matmul_generated.h:161
void add_in0_block_w(uint64_t in0_block_w)
Definition: matmul_generated.h:155
void add_per_core_n(uint64_t per_core_n)
Definition: matmul_generated.h:167
Definition: matmul_generated.h:199
static constexpr auto Create
Definition: matmul_generated.h:201
MatmulMultiCoreReuseProgramConfig type
Definition: matmul_generated.h:200
Definition: matmul_generated.h:710
::flatbuffers::uoffset_t start_
Definition: matmul_generated.h:713
void add_matmul_program_config_type(tt::target::ttnn::MatmulProgramConfig matmul_program_config_type)
Definition: matmul_generated.h:729
::flatbuffers::Offset< MatmulOp > Finish()
Definition: matmul_generated.h:739
void add_b(::flatbuffers::Offset< tt::target::ttnn::TensorRef > b)
Definition: matmul_generated.h:717
MatmulOp Table
Definition: matmul_generated.h:711
void add_transpose_a(bool transpose_a)
Definition: matmul_generated.h:723
void add_transpose_b(bool transpose_b)
Definition: matmul_generated.h:726
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: matmul_generated.h:720
void add_a(::flatbuffers::Offset< tt::target::ttnn::TensorRef > a)
Definition: matmul_generated.h:714
::flatbuffers::FlatBufferBuilder & fbb_
Definition: matmul_generated.h:712
void add_matmul_program_config(::flatbuffers::Offset< void > matmul_program_config)
Definition: matmul_generated.h:732
MatmulOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: matmul_generated.h:735
Definition: matmul_generated.h:766
static constexpr auto Create
Definition: matmul_generated.h:768
MatmulOp type
Definition: matmul_generated.h:767
Definition: matmul_generated.h:81
static const MatmulProgramConfig enum_value
Definition: matmul_generated.h:82