TT-MLIR
Classes | Namespaces | Enumerations | Functions | Variables
matmul_generated.h File Reference
#include "flatbuffers/flatbuffers.h"
#include "ttmlir/Target/Common/types_generated.h"
#include "ttmlir/Target/TTNN/operations/eltwise_generated.h"
#include "ttmlir/Target/TTNN/types_generated.h"

Go to the source code of this file.

Classes

struct  tt::target::ttnn::MatmulProgramConfigTraits< T >
 
struct  tt::target::ttnn::MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig >
 
struct  tt::target::ttnn::MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig >
 
struct  tt::target::ttnn::MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig >
 
struct  tt::target::ttnn::MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig >
 
struct  tt::target::ttnn::FLATBUFFERS_FINAL_CLASS
 
struct  tt::target::ttnn::MatmulMultiCoreReuseProgramConfigBuilder
 
struct  tt::target::ttnn::MatmulMultiCoreReuseProgramConfig::Traits
 
struct  tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfigBuilder
 
struct  tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig::Traits
 
struct  tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder
 
struct  tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig::Traits
 
struct  tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder
 
struct  tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig::Traits
 
struct  tt::target::ttnn::MatmulOpBuilder
 
struct  tt::target::ttnn::MatmulOp::Traits
 
struct  tt::target::ttnn::LinearOpBuilder
 
struct  tt::target::ttnn::LinearOp::Traits
 

Namespaces

 tt
 
 tt::target
 
 tt::target::ttnn
 

Enumerations

enum class  tt::target::ttnn::MatmulProgramConfig : uint8_t {
  tt::target::ttnn::NONE = 0 , tt::target::ttnn::MatmulMultiCoreReuseProgramConfig = 1 , tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig = 2 , tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig = 3 ,
  tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig = 4 , tt::target::ttnn::MIN = NONE , tt::target::ttnn::MAX = MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig
}
 

Functions

const MatmulProgramConfig(& tt::target::ttnn::EnumValuesMatmulProgramConfig ())[5]
 
const char *const * tt::target::ttnn::EnumNamesMatmulProgramConfig ()
 
const char * tt::target::ttnn::EnumNameMatmulProgramConfig (MatmulProgramConfig e)
 
bool tt::target::ttnn::VerifyMatmulProgramConfig (::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type)
 
bool tt::target::ttnn::VerifyMatmulProgramConfigVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > tt::target::ttnn::CreateMatmulMultiCoreReuseProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCastProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCast1DProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0)
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig > () const
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig > () const
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig > () const
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > () const
 
inline ::flatbuffers::Offset< MatmulOp > tt::target::ttnn::CreateMatmulOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0)
 
inline ::flatbuffers::Offset< LinearOp > tt::target::ttnn::CreateLinearOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false)
 

Variables

 VT_COMPUTE_WITH_STORAGE_GRID_SIZE = 4
 
 VT_IN0_BLOCK_W = 6
 
 VT_OUT_SUBBLOCK_H = 8
 
 VT_OUT_SUBBLOCK_W = 10
 
 VT_PER_CORE_M = 12
 
 VT_OUT_BLOCK_H = 12
 
 VT_OUT_BLOCK_W = 14
 
 VT_PER_CORE_N = 18
 
 VT_TRANSPOSE_MCAST = 20
 
 VT_FUSED_ACTIVATION = 22
 
 VT_FUSE_BATCH = 20
 
 VT_MCAST_IN0 = 24
 
 VT_GATHER_IN0 = 26
 
 VT_HOP_CORES = 28
 
 VT_A = 4
 
 VT_B = 6
 
 VT_OUT = 8
 
 VT_TRANSPOSE_A = 10
 
 VT_TRANSPOSE_B = 12
 
 VT_MATMUL_PROGRAM_CONFIG_TYPE = 14
 
 VT_BIAS = 8
 

Variable Documentation

◆ VT_A

VT_A = 4

◆ VT_B

VT_B = 6

◆ VT_BIAS

VT_BIAS = 8

◆ VT_COMPUTE_WITH_STORAGE_GRID_SIZE

VT_COMPUTE_WITH_STORAGE_GRID_SIZE = 4

◆ VT_FUSE_BATCH

VT_FUSE_BATCH = 20

◆ VT_FUSED_ACTIVATION

VT_FUSED_ACTIVATION = 22

◆ VT_GATHER_IN0

VT_GATHER_IN0 = 26

◆ VT_HOP_CORES

VT_HOP_CORES = 28

◆ VT_IN0_BLOCK_W

VT_IN0_BLOCK_W = 6

◆ VT_MATMUL_PROGRAM_CONFIG_TYPE

VT_MATMUL_PROGRAM_CONFIG_TYPE = 14

◆ VT_MCAST_IN0

VT_MCAST_IN0 = 24

◆ VT_OUT

VT_OUT = 8

◆ VT_OUT_BLOCK_H

VT_OUT_BLOCK_H = 12

◆ VT_OUT_BLOCK_W

VT_OUT_BLOCK_W = 14

◆ VT_OUT_SUBBLOCK_H

VT_OUT_SUBBLOCK_H = 8

◆ VT_OUT_SUBBLOCK_W

VT_OUT_SUBBLOCK_W = 10

◆ VT_PER_CORE_M

VT_PER_CORE_M = 12

◆ VT_PER_CORE_N

VT_PER_CORE_N = 18

◆ VT_TRANSPOSE_A

VT_TRANSPOSE_A = 10

◆ VT_TRANSPOSE_B

VT_TRANSPOSE_B = 12

◆ VT_TRANSPOSE_MCAST

VT_TRANSPOSE_MCAST = 20