TT-MLIR
Classes | Enumerations | Functions
tt::target::ttnn Namespace Reference

Classes

struct  TTNNBinaryBinarySchema
 
struct  FLATBUFFERS_FINAL_CLASS
 
struct  TTNNBinaryBuilder
 
struct  AllGatherOpBuilder
 
struct  CollectivePermuteOpBuilder
 
struct  MeshShardOpBuilder
 
struct  ReduceScatterOpBuilder
 
struct  Conv2dConfigBuilder
 
struct  PrepareConv2dWeightsOpBuilder
 
struct  Conv2dOpBuilder
 
struct  ConvTranspose2dOpBuilder
 
struct  CpuOpBuilder
 
struct  ArangeOpBuilder
 
struct  ConstantOpBuilder
 
struct  ConstructTensorOpBuilder
 
struct  EmptyOpBuilder
 
struct  FullOpBuilder
 
struct  NamedFullOpBuilder
 
struct  ConcatOpBuilder
 
struct  PadOpBuilder
 
struct  PermuteOpBuilder
 
struct  RepeatInterleaveOpBuilder
 
struct  RepeatOpBuilder
 
struct  ReshapeOpBuilder
 
struct  SliceOpBuilder
 
struct  TransposeOpBuilder
 
struct  DeallocateOpBuilder
 
struct  EltwiseQuantizationOpParamsTraits
 
struct  EltwiseQuantizationOpParamsTraits< tt::target::ttnn::QuantizeDequantizeOpParams >
 
struct  EltwiseQuantizationOpParamsTraits< tt::target::ttnn::RequantizeOpParams >
 
struct  EltwiseUnaryOpParamsTraits
 
struct  EltwiseUnaryOpParamsTraits< tt::target::ttnn::EltwiseOpWithFloatParams >
 
struct  EltwiseUnaryCompositeOpParamsTraits
 
struct  EltwiseUnaryCompositeOpParamsTraits< tt::target::ttnn::ClampScalarOpParams >
 
struct  EltwiseUnaryCompositeOpParamsTraits< tt::target::ttnn::ClampTensorOpParams >
 
struct  EltwiseOpWithFloatParamsBuilder
 
struct  EltwiseBinaryOpBuilder
 
struct  EltwiseBinaryCompositeOpBuilder
 
struct  QuantizeDequantizeOpParamsBuilder
 
struct  RequantizeOpParamsBuilder
 
struct  EltwiseQuantizationOpBuilder
 
struct  EltwiseTernaryWhereOpBuilder
 
struct  EltwiseUnaryOpBuilder
 
struct  ClampScalarOpParamsBuilder
 
struct  ClampTensorOpParamsBuilder
 
struct  EltwiseUnaryCompositeOpBuilder
 
struct  UnaryWithParamBuilder
 
struct  EmbeddingOpBuilder
 
struct  EmbeddingBackwardOpBuilder
 
struct  GetDeviceOpBuilder
 
struct  FillCacheOpBuilder
 
struct  UpdateCacheOpBuilder
 
struct  FromDeviceOpBuilder
 
struct  ToDeviceOpBuilder
 
struct  ToDTypeOpBuilder
 
struct  ToLayoutOpBuilder
 
struct  ToMemoryConfigOpBuilder
 
struct  TypecastOpBuilder
 
struct  MatmulProgramConfigTraits
 
struct  MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig >
 
struct  MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig >
 
struct  MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig >
 
struct  MatmulProgramConfigTraits< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig >
 
struct  MatmulMultiCoreReuseProgramConfigBuilder
 
struct  MatmulMultiCoreReuseMultiCastProgramConfigBuilder
 
struct  MatmulMultiCoreReuseMultiCast1DProgramConfigBuilder
 
struct  MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfigBuilder
 
struct  MatmulOpBuilder
 
struct  LinearOpBuilder
 
struct  MorehCumSumOpBuilder
 
struct  Scale2DTraits
 
struct  Scale2DTraits< tt::target::ttnn::UniformScale2D >
 
struct  Scale2DTraits< tt::target::ttnn::NonUniformScale2D >
 
struct  MaxPool2dOpBuilder
 
struct  UniformScale2DBuilder
 
struct  NonUniformScale2DBuilder
 
struct  UpsampleOpBuilder
 
struct  ReductionOpBuilder
 
struct  ReductionArgMaxOpBuilder
 
struct  ReductionProdOpBuilder
 
struct  SoftmaxOpBuilder
 
struct  OpTypeTraits
 
struct  OpTypeTraits< tt::target::ttnn::AllGatherOp >
 
struct  OpTypeTraits< tt::target::ttnn::CollectivePermuteOp >
 
struct  OpTypeTraits< tt::target::ttnn::MeshShardOp >
 
struct  OpTypeTraits< tt::target::ttnn::ReduceScatterOp >
 
struct  OpTypeTraits< tt::target::ttnn::GetDeviceOp >
 
struct  OpTypeTraits< tt::target::ttnn::PrepareConv2dWeightsOp >
 
struct  OpTypeTraits< tt::target::ttnn::Conv2dOp >
 
struct  OpTypeTraits< tt::target::ttnn::ConvTranspose2dOp >
 
struct  OpTypeTraits< tt::target::ttnn::CpuOp >
 
struct  OpTypeTraits< tt::target::ttnn::ArangeOp >
 
struct  OpTypeTraits< tt::target::ttnn::ConstantOp >
 
struct  OpTypeTraits< tt::target::ttnn::ConstructTensorOp >
 
struct  OpTypeTraits< tt::target::ttnn::EmptyOp >
 
struct  OpTypeTraits< tt::target::ttnn::FullOp >
 
struct  OpTypeTraits< tt::target::ttnn::NamedFullOp >
 
struct  OpTypeTraits< tt::target::ttnn::ConcatOp >
 
struct  OpTypeTraits< tt::target::ttnn::PadOp >
 
struct  OpTypeTraits< tt::target::ttnn::PermuteOp >
 
struct  OpTypeTraits< tt::target::ttnn::RepeatInterleaveOp >
 
struct  OpTypeTraits< tt::target::ttnn::RepeatOp >
 
struct  OpTypeTraits< tt::target::ttnn::ReshapeOp >
 
struct  OpTypeTraits< tt::target::ttnn::SliceOp >
 
struct  OpTypeTraits< tt::target::ttnn::TransposeOp >
 
struct  OpTypeTraits< tt::target::ttnn::DeallocateOp >
 
struct  OpTypeTraits< tt::target::ttnn::EltwiseBinaryOp >
 
struct  OpTypeTraits< tt::target::ttnn::EltwiseBinaryCompositeOp >
 
struct  OpTypeTraits< tt::target::ttnn::EltwiseTernaryWhereOp >
 
struct  OpTypeTraits< tt::target::ttnn::EltwiseQuantizationOp >
 
struct  OpTypeTraits< tt::target::ttnn::EltwiseUnaryOp >
 
struct  OpTypeTraits< tt::target::ttnn::EltwiseUnaryCompositeOp >
 
struct  OpTypeTraits< tt::target::ttnn::EmbeddingBackwardOp >
 
struct  OpTypeTraits< tt::target::ttnn::EmbeddingOp >
 
struct  OpTypeTraits< tt::target::ttnn::FillCacheOp >
 
struct  OpTypeTraits< tt::target::ttnn::UpdateCacheOp >
 
struct  OpTypeTraits< tt::target::ttnn::FromDeviceOp >
 
struct  OpTypeTraits< tt::target::ttnn::ToDeviceOp >
 
struct  OpTypeTraits< tt::target::ttnn::ToDTypeOp >
 
struct  OpTypeTraits< tt::target::ttnn::ToLayoutOp >
 
struct  OpTypeTraits< tt::target::ttnn::ToMemoryConfigOp >
 
struct  OpTypeTraits< tt::target::ttnn::TypecastOp >
 
struct  OpTypeTraits< tt::target::ttnn::LinearOp >
 
struct  OpTypeTraits< tt::target::ttnn::MatmulOp >
 
struct  OpTypeTraits< tt::target::ttnn::MorehCumSumOp >
 
struct  OpTypeTraits< tt::target::ttnn::SoftmaxOp >
 
struct  OpTypeTraits< tt::target::ttnn::MaxPool2dOp >
 
struct  OpTypeTraits< tt::target::ttnn::UpsampleOp >
 
struct  OpTypeTraits< tt::target::ttnn::ReductionArgMaxOp >
 
struct  OpTypeTraits< tt::target::ttnn::ReductionOp >
 
struct  OpTypeTraits< tt::target::ttnn::ReductionProdOp >
 
struct  OperationBuilder
 
struct  ProgramBuilder
 
struct  DistributedTensorConfigTraits
 
struct  DistributedTensorConfigTraits< tt::target::ttnn::ReplicateTensor >
 
struct  DistributedTensorConfigTraits< tt::target::ttnn::ShardTensor >
 
struct  DistributedTensorConfigTraits< tt::target::ttnn::ShardTensor2D >
 
struct  DistributedTensorConfigTraits< tt::target::ttnn::AllGatherTensor >
 
struct  CoreRangeSetBuilder
 
struct  ReplicateTensorBuilder
 
struct  ShardTensorBuilder
 
struct  ShardTensor2DBuilder
 
struct  AllGatherTensorBuilder
 
struct  DistributionStrategyBuilder
 
struct  ShardSpecBuilder
 
struct  MemoryConfigBuilder
 
struct  MemoryDescBuilder
 
struct  LayoutDescBuilder
 
struct  TensorDescBuilder
 
struct  TensorRefBuilder
 

Enumerations

enum class  NamedFullOpType : uint32_t { Zeros = 0 , Ones = 1 , MIN = Zeros , MAX = Ones }
 
enum class  EltwiseBinaryOpType : uint32_t {
  Add = 0 , Multiply = 1 , Subtract = 2 , Equal = 3 ,
  NotEqual = 4 , GreaterEqual = 5 , GreaterThan = 6 , LessEqual = 7 ,
  LessThan = 8 , Divide = 9 , LogicalAnd = 10 , LogicalOr = 11 ,
  LogicalXor = 12 , MIN = Add , MAX = LogicalXor
}
 
enum class  EltwiseBinaryCompositeOpType : uint32_t {
  Maximum = 0 , Minimum = 1 , Remainder = 2 , Scatter = 3 ,
  Pow = 4 , Atan2 = 5 , BitwiseAnd = 6 , BitwiseOr = 7 ,
  BitwiseXor = 8 , MIN = Maximum , MAX = BitwiseXor
}
 
enum class  EltwiseQuantizationOpType : uint32_t {
  Quantize = 0 , Dequantize = 1 , Requantize = 2 , MIN = Quantize ,
  MAX = Requantize
}
 
enum class  EltwiseQuantizationOpParams : uint8_t {
  NONE = 0 , QuantizeDequantizeOpParams = 1 , RequantizeOpParams = 2 , MIN = NONE ,
  MAX = RequantizeOpParams
}
 
enum class  EltwiseUnaryOpType : uint32_t {
  Abs = 0 , Ceil = 1 , Cos = 2 , Floor = 3 ,
  Gelu = 4 , IsFinite = 5 , LogicalNot = 6 , Neg = 7 ,
  Relu = 8 , Sqrt = 9 , Rsqrt = 10 , Sigmoid = 11 ,
  Sin = 12 , Reciprocal = 13 , Sign = 14 , Tan = 15 ,
  Tanh = 16 , Atan = 17 , Exp = 18 , Log = 19 ,
  Expm1 = 20 , LeakyRelu = 21 , BitwiseNot = 22 , MIN = Abs ,
  MAX = BitwiseNot
}
 
enum class  EltwiseUnaryOpParams : uint8_t { NONE = 0 , EltwiseOpWithFloatParams = 1 , MIN = NONE , MAX = EltwiseOpWithFloatParams }
 
enum class  EltwiseUnaryCompositeOpType : uint32_t {
  Cbrt = 0 , ClampScalar = 1 , ClampTensor = 2 , Log1p = 3 ,
  MIN = Cbrt , MAX = Log1p
}
 
enum class  EltwiseUnaryCompositeOpParams : uint8_t {
  NONE = 0 , ClampScalarOpParams = 1 , ClampTensorOpParams = 2 , MIN = NONE ,
  MAX = ClampTensorOpParams
}
 
enum class  UnaryOpType : uint32_t {
  Exp = 0 , Recip = 1 , Gelu = 2 , Relu = 3 ,
  Sqrt = 4 , Sigmoid = 5 , Log = 6 , Tanh = 7 ,
  Log2 = 8 , Log10 = 9 , Sin = 10 , Cos = 11 ,
  Abs = 12 , AbsInt32 = 13 , Sign = 14 , Square = 15 ,
  Eqz = 16 , Nez = 17 , Gtz = 18 , Ltz = 19 ,
  Gez = 20 , Lez = 21 , ReluMax = 22 , ReluMin = 23 ,
  Power = 24 , LeakyRelu = 25 , Elu = 26 , Exp2 = 27 ,
  Heaviside = 28 , Expm1 = 29 , Signbit = 30 , Asin = 31 ,
  Acos = 32 , Rsqrt = 33 , Relu6 = 34 , Atan = 35 ,
  Erf = 36 , Erfc = 37 , Isinf = 38 , Isposinf = 39 ,
  Isneginf = 40 , Isnan = 41 , LogicalNotUnary = 42 , Isfinite = 43 ,
  Erfinv = 44 , I0 = 45 , I1 = 46 , Tan = 47 ,
  Rsub = 48 , Rdiv = 49 , Silu = 50 , Softplus = 51 ,
  Identity = 52 , Neg = 53 , AddUnarySfpu = 54 , SubUnarySfpu = 55 ,
  MulUnarySfpu = 56 , DivUnarySfpu = 57 , IdentityUint32 = 58 , UnaryNe = 59 ,
  UnaryGt = 60 , UnaryLt = 61 , TiledProd = 62 , Typecast = 63 ,
  BitwiseXor = 64 , BitwiseNot = 65 , BitwiseAnd = 66 , BitwiseOr = 67 ,
  RightShift = 68 , Floor = 69 , FloorFloat32 = 70 , Ceil = 71 ,
  CeilFloat32 = 72 , LeftShift = 73 , Remainder = 74 , Fmod = 75 ,
  Dropout = 76 , Fill = 77 , PreluSfpu = 78 , ZeroPoint = 79 ,
  MIN = Exp , MAX = ZeroPoint
}
 
enum class  MatmulProgramConfig : uint8_t {
  NONE = 0 , MatmulMultiCoreReuseProgramConfig = 1 , MatmulMultiCoreReuseMultiCastProgramConfig = 2 , MatmulMultiCoreReuseMultiCast1DProgramConfig = 3 ,
  MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig = 4 , MIN = NONE , MAX = MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig
}
 
enum class  Scale2D : uint8_t {
  NONE = 0 , UniformScale2D = 1 , NonUniformScale2D = 2 , MIN = NONE ,
  MAX = NonUniformScale2D
}
 
enum class  ReductionOpType : uint32_t {
  Sum = 0 , Mean = 1 , Max = 2 , Min = 3 ,
  MIN = Sum , MAX = Min
}
 
enum class  OpType : uint8_t {
  NONE = 0 , AllGatherOp = 1 , CollectivePermuteOp = 2 , MeshShardOp = 3 ,
  ReduceScatterOp = 4 , GetDeviceOp = 5 , PrepareConv2dWeightsOp = 6 , Conv2dOp = 7 ,
  ConvTranspose2dOp = 8 , CpuOp = 9 , ArangeOp = 10 , ConstantOp = 11 ,
  ConstructTensorOp = 12 , EmptyOp = 13 , FullOp = 14 , NamedFullOp = 15 ,
  ConcatOp = 16 , PadOp = 17 , PermuteOp = 18 , RepeatInterleaveOp = 19 ,
  RepeatOp = 20 , ReshapeOp = 21 , SliceOp = 22 , TransposeOp = 23 ,
  DeallocateOp = 24 , EltwiseBinaryOp = 25 , EltwiseBinaryCompositeOp = 26 , EltwiseTernaryWhereOp = 27 ,
  EltwiseQuantizationOp = 28 , EltwiseUnaryOp = 29 , EltwiseUnaryCompositeOp = 30 , EmbeddingBackwardOp = 31 ,
  EmbeddingOp = 32 , FillCacheOp = 33 , UpdateCacheOp = 34 , FromDeviceOp = 35 ,
  ToDeviceOp = 36 , ToDTypeOp = 37 , ToLayoutOp = 38 , ToMemoryConfigOp = 39 ,
  TypecastOp = 40 , LinearOp = 41 , MatmulOp = 42 , MorehCumSumOp = 43 ,
  SoftmaxOp = 44 , MaxPool2dOp = 45 , UpsampleOp = 46 , ReductionArgMaxOp = 47 ,
  ReductionOp = 48 , ReductionProdOp = 49 , MIN = NONE , MAX = ReductionProdOp
}
 
enum class  TensorMemoryLayout : uint16_t {
  Interleaved = 0 , SingleBank = 1 , HeightSharded = 2 , WidthSharded = 3 ,
  BlockSharded = 4 , MIN = Interleaved , MAX = BlockSharded
}
 
enum class  StorageType : uint16_t {
  Owned = 0 , Device = 1 , Borrowed = 2 , MultiDevice = 3 ,
  MultiDeviceHost = 4 , MIN = Owned , MAX = MultiDeviceHost
}
 
enum class  MeshShardDirection : uint32_t { FullToShardShape = 0 , ShardToFullShape = 1 , MIN = FullToShardShape , MAX = ShardToFullShape }
 
enum class  MeshShardType : uint32_t {
  Identity = 0 , Replicate = 1 , Maximal = 2 , Devices = 3 ,
  MIN = Identity , MAX = Devices
}
 
enum class  DistributedTensorConfig : uint8_t {
  NONE = 0 , ReplicateTensor = 1 , ShardTensor = 2 , ShardTensor2D = 3 ,
  AllGatherTensor = 4 , MIN = NONE , MAX = AllGatherTensor
}
 

Functions

inline ::flatbuffers::Offset< TTNNBinary > CreateTTNNBinary (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Version *version=nullptr, ::flatbuffers::Offset<::flatbuffers::String > ttmlir_git_hash=0, ::flatbuffers::Offset< tt::target::SystemDesc > system_desc=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Program >>> programs=0)
 
inline ::flatbuffers::Offset< TTNNBinary > CreateTTNNBinaryDirect (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Version *version=nullptr, const char *ttmlir_git_hash=nullptr, ::flatbuffers::Offset< tt::target::SystemDesc > system_desc=0, const std::vector<::flatbuffers::Offset< tt::target::ttnn::Program >> *programs=nullptr)
 
const tt::target::ttnn::TTNNBinary * GetTTNNBinary (const void *buf)
 
const tt::target::ttnn::TTNNBinary * GetSizePrefixedTTNNBinary (const void *buf)
 
const char * TTNNBinaryIdentifier ()
 
bool TTNNBinaryBufferHasIdentifier (const void *buf)
 
bool SizePrefixedTTNNBinaryBufferHasIdentifier (const void *buf)
 
bool VerifyTTNNBinaryBuffer (::flatbuffers::Verifier &verifier)
 
bool VerifySizePrefixedTTNNBinaryBuffer (::flatbuffers::Verifier &verifier)
 
const char * TTNNBinaryExtension ()
 
void FinishTTNNBinaryBuffer (::flatbuffers::FlatBufferBuilder &fbb, ::flatbuffers::Offset< tt::target::ttnn::TTNNBinary > root)
 
void FinishSizePrefixedTTNNBinaryBuffer (::flatbuffers::FlatBufferBuilder &fbb, ::flatbuffers::Offset< tt::target::ttnn::TTNNBinary > root)
 
inline ::flatbuffers::Offset< AllGatherOp > CreateAllGatherOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, int32_t all_gather_dim=0, uint32_t cluster_axis=0, uint32_t num_links=0)
 
inline ::flatbuffers::Offset< CollectivePermuteOp > CreateCollectivePermuteOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> source_target_pairs=0)
 
inline ::flatbuffers::Offset< CollectivePermuteOp > CreateCollectivePermuteOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, const std::vector< int64_t > *source_target_pairs=nullptr)
 
inline ::flatbuffers::Offset< MeshShardOp > CreateMeshShardOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, tt::target::ttnn::MeshShardDirection shard_direction=tt::target::ttnn::MeshShardDirection::FullToShardShape, tt::target::ttnn::MeshShardType shard_type=tt::target::ttnn::MeshShardType::Identity, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_shape=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_dims=0)
 
inline ::flatbuffers::Offset< MeshShardOp > CreateMeshShardOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, tt::target::ttnn::MeshShardDirection shard_direction=tt::target::ttnn::MeshShardDirection::FullToShardShape, tt::target::ttnn::MeshShardType shard_type=tt::target::ttnn::MeshShardType::Identity, const std::vector< int64_t > *shard_shape=nullptr, const std::vector< int64_t > *shard_dims=nullptr)
 
inline ::flatbuffers::Offset< ReduceScatterOp > CreateReduceScatterOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, int32_t scatter_dim=0, uint32_t reduce_type=0, uint32_t cluster_axis=0, uint32_t num_links=0)
 
inline ::flatbuffers::Offset< Conv2dConfig > CreateConv2dConfig (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::DataType weights_dtype=tt::target::DataType::Float32, ::flatbuffers::Offset<::flatbuffers::String > activation=0, uint32_t input_channels_alignment=0, bool deallocate_activation=false, bool reallocate_halo_output=false, uint32_t act_block_h_override=0, uint32_t act_block_w_div=0, bool reshard_if_not_optimal=false, bool override_sharding_config=false, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, bool transpose_shards=false, tt::target::TensorLayout output_layout=tt::target::TensorLayout::RowMajor, bool preprocess_weights_on_device=false, bool always_preprocess_weights=false, bool enable_act_double_buffer=false, bool enable_weights_double_buffer=false, bool enable_split_reader=false, bool enable_subblock_padding=false)
 
inline ::flatbuffers::Offset< Conv2dConfig > CreateConv2dConfigDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::DataType weights_dtype=tt::target::DataType::Float32, const char *activation=nullptr, uint32_t input_channels_alignment=0, bool deallocate_activation=false, bool reallocate_halo_output=false, uint32_t act_block_h_override=0, uint32_t act_block_w_div=0, bool reshard_if_not_optimal=false, bool override_sharding_config=false, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, bool transpose_shards=false, tt::target::TensorLayout output_layout=tt::target::TensorLayout::RowMajor, bool preprocess_weights_on_device=false, bool always_preprocess_weights=false, bool enable_act_double_buffer=false, bool enable_weights_double_buffer=false, bool enable_split_reader=false, bool enable_subblock_padding=false)
 
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > CreatePrepareConv2dWeightsOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset<::flatbuffers::String > weights_format=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
 
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > CreatePrepareConv2dWeightsOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, const char *weights_format=nullptr, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
 
inline ::flatbuffers::Offset< Conv2dOp > CreateConv2dOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
 
inline ::flatbuffers::Offset< Conv2dOp > CreateConv2dOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
 
inline ::flatbuffers::Offset< ConvTranspose2dOp > CreateConvTranspose2dOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> output_padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0)
 
inline ::flatbuffers::Offset< ConvTranspose2dOp > CreateConvTranspose2dOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *output_padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0)
 
inline ::flatbuffers::Offset< CpuOp > CreateCpuOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> ins=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::String > func_name=0, uint32_t dylib_id=0)
 
inline ::flatbuffers::Offset< CpuOp > CreateCpuOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *ins=nullptr, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const char *func_name=nullptr, uint32_t dylib_id=0)
 
const NamedFullOpType(& EnumValuesNamedFullOpType ())[2]
 
const char *const * EnumNamesNamedFullOpType ()
 
const char * EnumNameNamedFullOpType (NamedFullOpType e)
 
inline ::flatbuffers::Offset< ArangeOp > CreateArangeOp (::flatbuffers::FlatBufferBuilder &_fbb, float start=0.0f, float end=0.0f, float step=0.0f, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ConstantOp > CreateConstantOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< uint8_t >> data=0)
 
inline ::flatbuffers::Offset< ConstantOp > CreateConstantOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< uint8_t > *data=nullptr)
 
inline ::flatbuffers::Offset< ConstructTensorOp > CreateConstructTensorOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shape=0, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ConstructTensorOp > CreateConstructTensorOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int64_t > *shape=nullptr, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< EmptyOp > CreateEmptyOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shape=0, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, uint32_t num_shards=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< EmptyOp > CreateEmptyOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int64_t > *shape=nullptr, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, uint32_t num_shards=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< FullOp > CreateFullOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, float fill_value=0.0f, uint32_t num_shards=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< NamedFullOp > CreateNamedFullOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::NamedFullOpType type=tt::target::ttnn::NamedFullOpType::Zeros, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shape=0, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::TensorLayout > layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< NamedFullOp > CreateNamedFullOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::NamedFullOpType type=tt::target::ttnn::NamedFullOpType::Zeros, const std::vector< int64_t > *shape=nullptr, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::TensorLayout > layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ConcatOp > CreateConcatOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> inputs=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
 
inline ::flatbuffers::Offset< ConcatOp > CreateConcatOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *inputs=nullptr, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
 
inline ::flatbuffers::Offset< PadOp > CreatePadOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >> padding=0, float value=0.0f, bool use_multicore=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0)
 
inline ::flatbuffers::Offset< PadOp > CreatePadOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< uint32_t > *padding=nullptr, float value=0.0f, bool use_multicore=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0)
 
inline ::flatbuffers::Offset< PermuteOp > CreatePermuteOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> permutation=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, float pad_value=0.0f, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< PermuteOp > CreatePermuteOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, const std::vector< int64_t > *permutation=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, float pad_value=0.0f, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< RepeatInterleaveOp > CreateRepeatInterleaveOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t repeats=0, int32_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
 
inline ::flatbuffers::Offset< RepeatOp > CreateRepeatOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> repeat_dims=0)
 
inline ::flatbuffers::Offset< RepeatOp > CreateRepeatOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int64_t > *repeat_dims=nullptr)
 
inline ::flatbuffers::Offset< ReshapeOp > CreateReshapeOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> shape=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
 
inline ::flatbuffers::Offset< ReshapeOp > CreateReshapeOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int32_t > *shape=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
 
inline ::flatbuffers::Offset< SliceOp > CreateSliceOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> begins=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> ends=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> step=0)
 
inline ::flatbuffers::Offset< SliceOp > CreateSliceOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int64_t > *begins=nullptr, const std::vector< int64_t > *ends=nullptr, const std::vector< int64_t > *step=nullptr)
 
inline ::flatbuffers::Offset< TransposeOp > CreateTransposeOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dim0=0, int32_t dim1=0)
 
inline ::flatbuffers::Offset< DeallocateOp > CreateDeallocateOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, bool force=false)
 
const EltwiseBinaryOpType(& EnumValuesEltwiseBinaryOpType ())[13]
 
const char *const * EnumNamesEltwiseBinaryOpType ()
 
const char * EnumNameEltwiseBinaryOpType (EltwiseBinaryOpType e)
 
const EltwiseBinaryCompositeOpType(& EnumValuesEltwiseBinaryCompositeOpType ())[9]
 
const char *const * EnumNamesEltwiseBinaryCompositeOpType ()
 
const char * EnumNameEltwiseBinaryCompositeOpType (EltwiseBinaryCompositeOpType e)
 
const EltwiseQuantizationOpType(& EnumValuesEltwiseQuantizationOpType ())[3]
 
const char *const * EnumNamesEltwiseQuantizationOpType ()
 
const char * EnumNameEltwiseQuantizationOpType (EltwiseQuantizationOpType e)
 
const EltwiseQuantizationOpParams(& EnumValuesEltwiseQuantizationOpParams ())[3]
 
const char *const * EnumNamesEltwiseQuantizationOpParams ()
 
const char * EnumNameEltwiseQuantizationOpParams (EltwiseQuantizationOpParams e)
 
bool VerifyEltwiseQuantizationOpParams (::flatbuffers::Verifier &verifier, const void *obj, EltwiseQuantizationOpParams type)
 
bool VerifyEltwiseQuantizationOpParamsVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< EltwiseQuantizationOpParams > *types)
 
const EltwiseUnaryOpType(& EnumValuesEltwiseUnaryOpType ())[23]
 
const char *const * EnumNamesEltwiseUnaryOpType ()
 
const char * EnumNameEltwiseUnaryOpType (EltwiseUnaryOpType e)
 
const EltwiseUnaryOpParams(& EnumValuesEltwiseUnaryOpParams ())[2]
 
const char *const * EnumNamesEltwiseUnaryOpParams ()
 
const char * EnumNameEltwiseUnaryOpParams (EltwiseUnaryOpParams e)
 
bool VerifyEltwiseUnaryOpParams (::flatbuffers::Verifier &verifier, const void *obj, EltwiseUnaryOpParams type)
 
bool VerifyEltwiseUnaryOpParamsVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< EltwiseUnaryOpParams > *types)
 
const EltwiseUnaryCompositeOpType(& EnumValuesEltwiseUnaryCompositeOpType ())[4]
 
const char *const * EnumNamesEltwiseUnaryCompositeOpType ()
 
const char * EnumNameEltwiseUnaryCompositeOpType (EltwiseUnaryCompositeOpType e)
 
const EltwiseUnaryCompositeOpParams(& EnumValuesEltwiseUnaryCompositeOpParams ())[3]
 
const char *const * EnumNamesEltwiseUnaryCompositeOpParams ()
 
const char * EnumNameEltwiseUnaryCompositeOpParams (EltwiseUnaryCompositeOpParams e)
 
bool VerifyEltwiseUnaryCompositeOpParams (::flatbuffers::Verifier &verifier, const void *obj, EltwiseUnaryCompositeOpParams type)
 
bool VerifyEltwiseUnaryCompositeOpParamsVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< EltwiseUnaryCompositeOpParams > *types)
 
const UnaryOpType(& EnumValuesUnaryOpType ())[80]
 
const char *const * EnumNamesUnaryOpType ()
 
const char * EnumNameUnaryOpType (UnaryOpType e)
 
inline ::flatbuffers::Offset< EltwiseOpWithFloatParams > CreateEltwiseOpWithFloatParams (::flatbuffers::FlatBufferBuilder &_fbb, float parameter=0.0f)
 
inline ::flatbuffers::Offset< EltwiseBinaryOp > CreateEltwiseBinaryOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseBinaryOpType type=tt::target::ttnn::EltwiseBinaryOpType::Add, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > lhs=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > rhs=0, ::flatbuffers::Optional< tt::target::DataType > output_dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< EltwiseBinaryCompositeOp > CreateEltwiseBinaryCompositeOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseBinaryCompositeOpType type=tt::target::ttnn::EltwiseBinaryCompositeOpType::Maximum, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > lhs=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > rhs=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< QuantizeDequantizeOpParams > CreateQuantizeDequantizeOpParams (::flatbuffers::FlatBufferBuilder &_fbb, float scale=0.0f, int32_t zero_point=0)
 
inline ::flatbuffers::Offset< RequantizeOpParams > CreateRequantizeOpParams (::flatbuffers::FlatBufferBuilder &_fbb, float in_scale=0.0f, int32_t in_zero_point=0, float out_scale=0.0f, int32_t out_zero_point=0)
 
template<>
const tt::target::ttnn::QuantizeDequantizeOpParams * EltwiseQuantizationOp::params_as< tt::target::ttnn::QuantizeDequantizeOpParams > () const
 
template<>
const tt::target::ttnn::RequantizeOpParams * EltwiseQuantizationOp::params_as< tt::target::ttnn::RequantizeOpParams > () const
 
inline ::flatbuffers::Offset< EltwiseQuantizationOp > CreateEltwiseQuantizationOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseQuantizationOpType type=tt::target::ttnn::EltwiseQuantizationOpType::Quantize, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Optional< int32_t > axis=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::DataType > output_dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, tt::target::ttnn::EltwiseQuantizationOpParams params_type=tt::target::ttnn::EltwiseQuantizationOpParams::NONE, ::flatbuffers::Offset< void > params=0)
 
inline ::flatbuffers::Offset< EltwiseTernaryWhereOp > CreateEltwiseTernaryWhereOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > first=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > second=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > third=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
template<>
const tt::target::ttnn::EltwiseOpWithFloatParams * EltwiseUnaryOp::params_as< tt::target::ttnn::EltwiseOpWithFloatParams > () const
 
inline ::flatbuffers::Offset< EltwiseUnaryOp > CreateEltwiseUnaryOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseUnaryOpType type=tt::target::ttnn::EltwiseUnaryOpType::Abs, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, tt::target::ttnn::EltwiseUnaryOpParams params_type=tt::target::ttnn::EltwiseUnaryOpParams::NONE, ::flatbuffers::Offset< void > params=0)
 
inline ::flatbuffers::Offset< ClampScalarOpParams > CreateClampScalarOpParams (::flatbuffers::FlatBufferBuilder &_fbb, float min=0.0f, float max=0.0f)
 
inline ::flatbuffers::Offset< ClampTensorOpParams > CreateClampTensorOpParams (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > min=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > max=0)
 
template<>
const tt::target::ttnn::ClampScalarOpParams * EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampScalarOpParams > () const
 
template<>
const tt::target::ttnn::ClampTensorOpParams * EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampTensorOpParams > () const
 
inline ::flatbuffers::Offset< EltwiseUnaryCompositeOp > CreateEltwiseUnaryCompositeOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseUnaryCompositeOpType type=tt::target::ttnn::EltwiseUnaryCompositeOpType::Cbrt, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, tt::target::ttnn::EltwiseUnaryCompositeOpParams params_type=tt::target::ttnn::EltwiseUnaryCompositeOpParams::NONE, ::flatbuffers::Offset< void > params=0)
 
inline ::flatbuffers::Offset< UnaryWithParam > CreateUnaryWithParam (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::UnaryOpType op_type=tt::target::ttnn::UnaryOpType::Exp, ::flatbuffers::Offset<::flatbuffers::Vector< double >> params=0)
 
inline ::flatbuffers::Offset< UnaryWithParam > CreateUnaryWithParamDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::UnaryOpType op_type=tt::target::ttnn::UnaryOpType::Exp, const std::vector< double > *params=nullptr)
 
inline ::flatbuffers::Offset< EmbeddingOp > CreateEmbeddingOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< EmbeddingBackwardOp > CreateEmbeddingBackwardOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in_grad=0, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< GetDeviceOp > CreateGetDeviceOp (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Dim2d *mesh=nullptr, const tt::target::Dim2d *offset=nullptr, ::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >> chip_ids=0, ::flatbuffers::Offset< tt::target::DeviceRef > out=0)
 
inline ::flatbuffers::Offset< GetDeviceOp > CreateGetDeviceOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Dim2d *mesh=nullptr, const tt::target::Dim2d *offset=nullptr, const std::vector< uint32_t > *chip_ids=nullptr, ::flatbuffers::Offset< tt::target::DeviceRef > out=0)
 
inline ::flatbuffers::Offset< FillCacheOp > CreateFillCacheOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > cache=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, uint32_t batch_offset=0)
 
inline ::flatbuffers::Offset< UpdateCacheOp > CreateUpdateCacheOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > cache=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > update_index=0, uint32_t batch_offset=0)
 
inline ::flatbuffers::Offset< FromDeviceOp > CreateFromDeviceOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ToDeviceOp > CreateToDeviceOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ToDTypeOp > CreateToDTypeOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::DataType dtype=tt::target::DataType::Float32, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ToLayoutOp > CreateToLayoutOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< ToMemoryConfigOp > CreateToMemoryConfigOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in0=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< TypecastOp > CreateTypecastOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::DataType dtype=tt::target::DataType::Float32, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
const MatmulProgramConfig(& EnumValuesMatmulProgramConfig ())[5]
 
const char *const * EnumNamesMatmulProgramConfig ()
 
const char * EnumNameMatmulProgramConfig (MatmulProgramConfig e)
 
bool VerifyMatmulProgramConfig (::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type)
 
bool VerifyMatmulProgramConfigVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > CreateMatmulMultiCoreReuseProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > CreateMatmulMultiCoreReuseMultiCastProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > CreateMatmulMultiCoreReuseMultiCast1DProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0)
 
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0)
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig > () const
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig > () const
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig > () const
 
template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > () const
 
inline ::flatbuffers::Offset< MatmulOp > CreateMatmulOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0)
 
inline ::flatbuffers::Offset< LinearOp > CreateLinearOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false)
 
inline ::flatbuffers::Offset< MorehCumSumOp > CreateMorehCumSumOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int64_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0)
 
const Scale2D(& EnumValuesScale2D ())[3]
 
const char *const * EnumNamesScale2D ()
 
const char * EnumNameScale2D (Scale2D e)
 
bool VerifyScale2D (::flatbuffers::Verifier &verifier, const void *obj, Scale2D type)
 
bool VerifyScale2DVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< Scale2D > *types)
 
inline ::flatbuffers::Offset< MaxPool2dOp > CreateMaxPool2dOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool ceil_mode=false)
 
inline ::flatbuffers::Offset< MaxPool2dOp > CreateMaxPool2dOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool ceil_mode=false)
 
inline ::flatbuffers::Offset< UniformScale2D > CreateUniformScale2D (::flatbuffers::FlatBufferBuilder &_fbb, int32_t scale=0)
 
inline ::flatbuffers::Offset< NonUniformScale2D > CreateNonUniformScale2D (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> scale=0)
 
inline ::flatbuffers::Offset< NonUniformScale2D > CreateNonUniformScale2DDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int32_t > *scale=nullptr)
 
template<>
const tt::target::ttnn::UniformScale2D * UpsampleOp::scale_factor_as< tt::target::ttnn::UniformScale2D > () const
 
template<>
const tt::target::ttnn::NonUniformScale2D * UpsampleOp::scale_factor_as< tt::target::ttnn::NonUniformScale2D > () const
 
inline ::flatbuffers::Offset< UpsampleOp > CreateUpsampleOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, ::flatbuffers::Offset<::flatbuffers::String > mode=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
inline ::flatbuffers::Offset< UpsampleOp > CreateUpsampleOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, const char *mode=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
 
const ReductionOpType(& EnumValuesReductionOpType ())[4]
 
const char *const * EnumNamesReductionOpType ()
 
const char * EnumNameReductionOpType (ReductionOpType e)
 
inline ::flatbuffers::Offset< ReductionOp > CreateReductionOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::ReductionOpType type=tt::target::ttnn::ReductionOpType::Sum, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dim_arg=0, bool keep_dim=false)
 
inline ::flatbuffers::Offset< ReductionOp > CreateReductionOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::ReductionOpType type=tt::target::ttnn::ReductionOpType::Sum, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int32_t > *dim_arg=nullptr, bool keep_dim=false)
 
inline ::flatbuffers::Offset< ReductionArgMaxOp > CreateReductionArgMaxOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Optional< int32_t > dim=::flatbuffers::nullopt, bool use_multicore=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0)
 
inline ::flatbuffers::Offset< ReductionProdOp > CreateReductionProdOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool all_dimensions=false, int64_t dim_arg=0, bool keep_dim=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0)
 
inline ::flatbuffers::Offset< SoftmaxOp > CreateSoftmaxOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dimension=0)
 
const OpType(& EnumValuesOpType ())[50]
 
const char *const * EnumNamesOpType ()
 
const char * EnumNameOpType (OpType e)
 
bool VerifyOpType (::flatbuffers::Verifier &verifier, const void *obj, OpType type)
 
bool VerifyOpTypeVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< OpType > *types)
 
template<>
const tt::target::ttnn::AllGatherOp * Operation::type_as< tt::target::ttnn::AllGatherOp > () const
 
template<>
const tt::target::ttnn::CollectivePermuteOp * Operation::type_as< tt::target::ttnn::CollectivePermuteOp > () const
 
template<>
const tt::target::ttnn::MeshShardOp * Operation::type_as< tt::target::ttnn::MeshShardOp > () const
 
template<>
const tt::target::ttnn::ReduceScatterOp * Operation::type_as< tt::target::ttnn::ReduceScatterOp > () const
 
template<>
const tt::target::ttnn::GetDeviceOp * Operation::type_as< tt::target::ttnn::GetDeviceOp > () const
 
template<>
const tt::target::ttnn::PrepareConv2dWeightsOp * Operation::type_as< tt::target::ttnn::PrepareConv2dWeightsOp > () const
 
template<>
const tt::target::ttnn::Conv2dOp * Operation::type_as< tt::target::ttnn::Conv2dOp > () const
 
template<>
const tt::target::ttnn::ConvTranspose2dOp * Operation::type_as< tt::target::ttnn::ConvTranspose2dOp > () const
 
template<>
const tt::target::ttnn::CpuOp * Operation::type_as< tt::target::ttnn::CpuOp > () const
 
template<>
const tt::target::ttnn::ArangeOp * Operation::type_as< tt::target::ttnn::ArangeOp > () const
 
template<>
const tt::target::ttnn::ConstantOp * Operation::type_as< tt::target::ttnn::ConstantOp > () const
 
template<>
const tt::target::ttnn::ConstructTensorOp * Operation::type_as< tt::target::ttnn::ConstructTensorOp > () const
 
template<>
const tt::target::ttnn::EmptyOp * Operation::type_as< tt::target::ttnn::EmptyOp > () const
 
template<>
const tt::target::ttnn::FullOp * Operation::type_as< tt::target::ttnn::FullOp > () const
 
template<>
const tt::target::ttnn::NamedFullOp * Operation::type_as< tt::target::ttnn::NamedFullOp > () const
 
template<>
const tt::target::ttnn::ConcatOp * Operation::type_as< tt::target::ttnn::ConcatOp > () const
 
template<>
const tt::target::ttnn::PadOp * Operation::type_as< tt::target::ttnn::PadOp > () const
 
template<>
const tt::target::ttnn::PermuteOp * Operation::type_as< tt::target::ttnn::PermuteOp > () const
 
template<>
const tt::target::ttnn::RepeatInterleaveOp * Operation::type_as< tt::target::ttnn::RepeatInterleaveOp > () const
 
template<>
const tt::target::ttnn::RepeatOp * Operation::type_as< tt::target::ttnn::RepeatOp > () const
 
template<>
const tt::target::ttnn::ReshapeOp * Operation::type_as< tt::target::ttnn::ReshapeOp > () const
 
template<>
const tt::target::ttnn::SliceOp * Operation::type_as< tt::target::ttnn::SliceOp > () const
 
template<>
const tt::target::ttnn::TransposeOp * Operation::type_as< tt::target::ttnn::TransposeOp > () const
 
template<>
const tt::target::ttnn::DeallocateOp * Operation::type_as< tt::target::ttnn::DeallocateOp > () const
 
template<>
const tt::target::ttnn::EltwiseBinaryOp * Operation::type_as< tt::target::ttnn::EltwiseBinaryOp > () const
 
template<>
const tt::target::ttnn::EltwiseBinaryCompositeOp * Operation::type_as< tt::target::ttnn::EltwiseBinaryCompositeOp > () const
 
template<>
const tt::target::ttnn::EltwiseTernaryWhereOp * Operation::type_as< tt::target::ttnn::EltwiseTernaryWhereOp > () const
 
template<>
const tt::target::ttnn::EltwiseQuantizationOp * Operation::type_as< tt::target::ttnn::EltwiseQuantizationOp > () const
 
template<>
const tt::target::ttnn::EltwiseUnaryOp * Operation::type_as< tt::target::ttnn::EltwiseUnaryOp > () const
 
template<>
const tt::target::ttnn::EltwiseUnaryCompositeOp * Operation::type_as< tt::target::ttnn::EltwiseUnaryCompositeOp > () const
 
template<>
const tt::target::ttnn::EmbeddingBackwardOp * Operation::type_as< tt::target::ttnn::EmbeddingBackwardOp > () const
 
template<>
const tt::target::ttnn::EmbeddingOp * Operation::type_as< tt::target::ttnn::EmbeddingOp > () const
 
template<>
const tt::target::ttnn::FillCacheOp * Operation::type_as< tt::target::ttnn::FillCacheOp > () const
 
template<>
const tt::target::ttnn::UpdateCacheOp * Operation::type_as< tt::target::ttnn::UpdateCacheOp > () const
 
template<>
const tt::target::ttnn::FromDeviceOp * Operation::type_as< tt::target::ttnn::FromDeviceOp > () const
 
template<>
const tt::target::ttnn::ToDeviceOp * Operation::type_as< tt::target::ttnn::ToDeviceOp > () const
 
template<>
const tt::target::ttnn::ToDTypeOp * Operation::type_as< tt::target::ttnn::ToDTypeOp > () const
 
template<>
const tt::target::ttnn::ToLayoutOp * Operation::type_as< tt::target::ttnn::ToLayoutOp > () const
 
template<>
const tt::target::ttnn::ToMemoryConfigOp * Operation::type_as< tt::target::ttnn::ToMemoryConfigOp > () const
 
template<>
const tt::target::ttnn::TypecastOp * Operation::type_as< tt::target::ttnn::TypecastOp > () const
 
template<>
const tt::target::ttnn::LinearOp * Operation::type_as< tt::target::ttnn::LinearOp > () const
 
template<>
const tt::target::ttnn::MatmulOp * Operation::type_as< tt::target::ttnn::MatmulOp > () const
 
template<>
const tt::target::ttnn::MorehCumSumOp * Operation::type_as< tt::target::ttnn::MorehCumSumOp > () const
 
template<>
const tt::target::ttnn::SoftmaxOp * Operation::type_as< tt::target::ttnn::SoftmaxOp > () const
 
template<>
const tt::target::ttnn::MaxPool2dOp * Operation::type_as< tt::target::ttnn::MaxPool2dOp > () const
 
template<>
const tt::target::ttnn::UpsampleOp * Operation::type_as< tt::target::ttnn::UpsampleOp > () const
 
template<>
const tt::target::ttnn::ReductionArgMaxOp * Operation::type_as< tt::target::ttnn::ReductionArgMaxOp > () const
 
template<>
const tt::target::ttnn::ReductionOp * Operation::type_as< tt::target::ttnn::ReductionOp > () const
 
template<>
const tt::target::ttnn::ReductionProdOp * Operation::type_as< tt::target::ttnn::ReductionProdOp > () const
 
inline ::flatbuffers::Offset< Operation > CreateOperation (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::OpType type_type=tt::target::ttnn::OpType::NONE, ::flatbuffers::Offset< void > type=0, ::flatbuffers::Offset<::flatbuffers::String > debug_info=0, ::flatbuffers::Offset<::flatbuffers::String > loc_info=0)
 
inline ::flatbuffers::Offset< Operation > CreateOperationDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::OpType type_type=tt::target::ttnn::OpType::NONE, ::flatbuffers::Offset< void > type=0, const char *debug_info=nullptr, const char *loc_info=nullptr)
 
inline ::flatbuffers::Offset< Program > CreateProgram (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::String > name=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> inputs=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> outputs=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Operation >>> operations=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::DynamicLib >>> dylibs=0, ::flatbuffers::Offset< tt::target::DebugInfo > debug_info=0)
 
inline ::flatbuffers::Offset< Program > CreateProgramDirect (::flatbuffers::FlatBufferBuilder &_fbb, const char *name=nullptr, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *inputs=nullptr, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *outputs=nullptr, const std::vector<::flatbuffers::Offset< tt::target::ttnn::Operation >> *operations=nullptr, const std::vector<::flatbuffers::Offset< tt::target::DynamicLib >> *dylibs=nullptr, ::flatbuffers::Offset< tt::target::DebugInfo > debug_info=0)
 
const TensorMemoryLayout(& EnumValuesTensorMemoryLayout ())[5]
 
const char *const * EnumNamesTensorMemoryLayout ()
 
const char * EnumNameTensorMemoryLayout (TensorMemoryLayout e)
 
const StorageType(& EnumValuesStorageType ())[5]
 
const char *const * EnumNamesStorageType ()
 
const char * EnumNameStorageType (StorageType e)
 
const MeshShardDirection(& EnumValuesMeshShardDirection ())[2]
 
const char *const * EnumNamesMeshShardDirection ()
 
const char * EnumNameMeshShardDirection (MeshShardDirection e)
 
const MeshShardType(& EnumValuesMeshShardType ())[4]
 
const char *const * EnumNamesMeshShardType ()
 
const char * EnumNameMeshShardType (MeshShardType e)
 
const DistributedTensorConfig(& EnumValuesDistributedTensorConfig ())[5]
 
const char *const * EnumNamesDistributedTensorConfig ()
 
const char * EnumNameDistributedTensorConfig (DistributedTensorConfig e)
 
bool VerifyDistributedTensorConfig (::flatbuffers::Verifier &verifier, const void *obj, DistributedTensorConfig type)
 
bool VerifyDistributedTensorConfigVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< DistributedTensorConfig > *types)
 
 FLATBUFFERS_MANUALLY_ALIGNED_STRUCT (8) CoreCoord FLATBUFFERS_FINAL_CLASS
 
 FLATBUFFERS_STRUCT_END (CoreCoord, 16)
 
 FLATBUFFERS_STRUCT_END (CoreRange, 32)
 
inline ::flatbuffers::Offset< CoreRangeSet > CreateCoreRangeSet (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::ttnn::CoreRange * >> core_ranges=0)
 
inline ::flatbuffers::Offset< CoreRangeSet > CreateCoreRangeSetDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< tt::target::ttnn::CoreRange > *core_ranges=nullptr)
 
inline ::flatbuffers::Offset< ReplicateTensor > CreateReplicateTensor (::flatbuffers::FlatBufferBuilder &_fbb, uint32_t replication_factor=0)
 
inline ::flatbuffers::Offset< ShardTensor > CreateShardTensor (::flatbuffers::FlatBufferBuilder &_fbb, uint32_t shard_dim=0)
 
inline ::flatbuffers::Offset< ShardTensor2D > CreateShardTensor2D (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Dim2d *shard_mesh=nullptr)
 
inline ::flatbuffers::Offset< AllGatherTensor > CreateAllGatherTensor (::flatbuffers::FlatBufferBuilder &_fbb)
 
template<>
const tt::target::ttnn::ReplicateTensor * DistributionStrategy::strategy_as< tt::target::ttnn::ReplicateTensor > () const
 
template<>
const tt::target::ttnn::ShardTensor * DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor > () const
 
template<>
const tt::target::ttnn::ShardTensor2D * DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor2D > () const
 
template<>
const tt::target::ttnn::AllGatherTensor * DistributionStrategy::strategy_as< tt::target::ttnn::AllGatherTensor > () const
 
inline ::flatbuffers::Offset< DistributionStrategy > CreateDistributionStrategy (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::DistributedTensorConfig strategy_type=tt::target::ttnn::DistributedTensorConfig::NONE, ::flatbuffers::Offset< void > strategy=0)
 
inline ::flatbuffers::Offset< ShardSpec > CreateShardSpec (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::Dim2dRange * >> grid=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> shard_shape=0)
 
inline ::flatbuffers::Offset< ShardSpec > CreateShardSpecDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< tt::target::Dim2dRange > *grid=nullptr, const std::vector< int32_t > *shard_shape=nullptr)
 
inline ::flatbuffers::Offset< MemoryConfig > CreateMemoryConfig (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::TensorMemoryLayout tensor_memory_layout=tt::target::ttnn::TensorMemoryLayout::Interleaved, tt::target::BufferType buffer_type=tt::target::BufferType::DRAM, ::flatbuffers::Offset< tt::target::ttnn::ShardSpec > shard_spec=0)
 
inline ::flatbuffers::Offset< MemoryDesc > CreateMemoryDesc (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::StorageType storage_type=tt::target::ttnn::StorageType::Owned, const tt::target::Dim2d *tile_shape=nullptr, tt::target::DataType data_type=tt::target::DataType::Float32, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, uint64_t size=0)
 
inline ::flatbuffers::Offset< LayoutDesc > CreateLayoutDesc (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::OOBVal oob_val=tt::target::OOBVal::Undef, ::flatbuffers::Offset< tt::target::ttnn::MemoryDesc > memory_desc=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0)
 
inline ::flatbuffers::Offset< TensorDesc > CreateTensorDesc (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> shape=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> mesh_shape=0, ::flatbuffers::Offset< tt::target::ttnn::LayoutDesc > layout=0)
 
inline ::flatbuffers::Offset< TensorDesc > CreateTensorDescDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int32_t > *shape=nullptr, const std::vector< int32_t > *mesh_shape=nullptr, ::flatbuffers::Offset< tt::target::ttnn::LayoutDesc > layout=0)
 
inline ::flatbuffers::Offset< TensorRef > CreateTensorRef (::flatbuffers::FlatBufferBuilder &_fbb, uint32_t global_id=0, uint64_t size=0, ::flatbuffers::Offset< tt::target::ttnn::TensorDesc > desc=0)
 

Enumeration Type Documentation

◆ DistributedTensorConfig

Enumerator
NONE 
ReplicateTensor 
ShardTensor 
ShardTensor2D 
AllGatherTensor 
MIN 
MAX 

◆ EltwiseBinaryCompositeOpType

Enumerator
Maximum 
Minimum 
Remainder 
Scatter 
Pow 
Atan2 
BitwiseAnd 
BitwiseOr 
BitwiseXor 
MIN 
MAX 

◆ EltwiseBinaryOpType

enum tt::target::ttnn::EltwiseBinaryOpType : uint32_t
strong
Enumerator
Add 
Multiply 
Subtract 
Equal 
NotEqual 
GreaterEqual 
GreaterThan 
LessEqual 
LessThan 
Divide 
LogicalAnd 
LogicalOr 
LogicalXor 
MIN 
MAX 

◆ EltwiseQuantizationOpParams

Enumerator
NONE 
QuantizeDequantizeOpParams 
RequantizeOpParams 
MIN 
MAX 

◆ EltwiseQuantizationOpType

Enumerator
Quantize 
Dequantize 
Requantize 
MIN 
MAX 

◆ EltwiseUnaryCompositeOpParams

Enumerator
NONE 
ClampScalarOpParams 
ClampTensorOpParams 
MIN 
MAX 

◆ EltwiseUnaryCompositeOpType

Enumerator
Cbrt 
ClampScalar 
ClampTensor 
Log1p 
MIN 
MAX 

◆ EltwiseUnaryOpParams

Enumerator
NONE 
EltwiseOpWithFloatParams 
MIN 
MAX 

◆ EltwiseUnaryOpType

enum tt::target::ttnn::EltwiseUnaryOpType : uint32_t
strong
Enumerator
Abs 
Ceil 
Cos 
Floor 
Gelu 
IsFinite 
LogicalNot 
Neg 
Relu 
Sqrt 
Rsqrt 
Sigmoid 
Sin 
Reciprocal 
Sign 
Tan 
Tanh 
Atan 
Exp 
Log 
Expm1 
LeakyRelu 
BitwiseNot 
MIN 
MAX 

◆ MatmulProgramConfig

Enumerator
NONE 
MatmulMultiCoreReuseProgramConfig 
MatmulMultiCoreReuseMultiCastProgramConfig 
MatmulMultiCoreReuseMultiCast1DProgramConfig 
MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig 
MIN 
MAX 

◆ MeshShardDirection

enum tt::target::ttnn::MeshShardDirection : uint32_t
strong
Enumerator
FullToShardShape 
ShardToFullShape 
MIN 
MAX 

◆ MeshShardType

enum tt::target::ttnn::MeshShardType : uint32_t
strong
Enumerator
Identity 
Replicate 
Maximal 
Devices 
MIN 
MAX 

◆ NamedFullOpType

enum tt::target::ttnn::NamedFullOpType : uint32_t
strong
Enumerator
Zeros 
Ones 
MIN 
MAX 

◆ OpType

enum tt::target::ttnn::OpType : uint8_t
strong
Enumerator
NONE 
AllGatherOp 
CollectivePermuteOp 
MeshShardOp 
ReduceScatterOp 
GetDeviceOp 
PrepareConv2dWeightsOp 
Conv2dOp 
ConvTranspose2dOp 
CpuOp 
ArangeOp 
ConstantOp 
ConstructTensorOp 
EmptyOp 
FullOp 
NamedFullOp 
ConcatOp 
PadOp 
PermuteOp 
RepeatInterleaveOp 
RepeatOp 
ReshapeOp 
SliceOp 
TransposeOp 
DeallocateOp 
EltwiseBinaryOp 
EltwiseBinaryCompositeOp 
EltwiseTernaryWhereOp 
EltwiseQuantizationOp 
EltwiseUnaryOp 
EltwiseUnaryCompositeOp 
EmbeddingBackwardOp 
EmbeddingOp 
FillCacheOp 
UpdateCacheOp 
FromDeviceOp 
ToDeviceOp 
ToDTypeOp 
ToLayoutOp 
ToMemoryConfigOp 
TypecastOp 
LinearOp 
MatmulOp 
MorehCumSumOp 
SoftmaxOp 
MaxPool2dOp 
UpsampleOp 
ReductionArgMaxOp 
ReductionOp 
ReductionProdOp 
MIN 
MAX 

◆ ReductionOpType

enum tt::target::ttnn::ReductionOpType : uint32_t
strong
Enumerator
Sum 
Mean 
Max 
Min 
MIN 
MAX 

◆ Scale2D

enum tt::target::ttnn::Scale2D : uint8_t
strong
Enumerator
NONE 
UniformScale2D 
NonUniformScale2D 
MIN 
MAX 

◆ StorageType

enum tt::target::ttnn::StorageType : uint16_t
strong
Enumerator
Owned 
Device 
Borrowed 
MultiDevice 
MultiDeviceHost 
MIN 
MAX 

◆ TensorMemoryLayout

enum tt::target::ttnn::TensorMemoryLayout : uint16_t
strong
Enumerator
Interleaved 
SingleBank 
HeightSharded 
WidthSharded 
BlockSharded 
MIN 
MAX 

◆ UnaryOpType

enum tt::target::ttnn::UnaryOpType : uint32_t
strong
Enumerator
Exp 
Recip 
Gelu 
Relu 
Sqrt 
Sigmoid 
Log 
Tanh 
Log2 
Log10 
Sin 
Cos 
Abs 
AbsInt32 
Sign 
Square 
Eqz 
Nez 
Gtz 
Ltz 
Gez 
Lez 
ReluMax 
ReluMin 
Power 
LeakyRelu 
Elu 
Exp2 
Heaviside 
Expm1 
Signbit 
Asin 
Acos 
Rsqrt 
Relu6 
Atan 
Erf 
Erfc 
Isinf 
Isposinf 
Isneginf 
Isnan 
LogicalNotUnary 
Isfinite 
Erfinv 
I0 
I1 
Tan 
Rsub 
Rdiv 
Silu 
Softplus 
Identity 
Neg 
AddUnarySfpu 
SubUnarySfpu 
MulUnarySfpu 
DivUnarySfpu 
IdentityUint32 
UnaryNe 
UnaryGt 
UnaryLt 
TiledProd 
Typecast 
BitwiseXor 
BitwiseNot 
BitwiseAnd 
BitwiseOr 
RightShift 
Floor 
FloorFloat32 
Ceil 
CeilFloat32 
LeftShift 
Remainder 
Fmod 
Dropout 
Fill 
PreluSfpu 
ZeroPoint 
MIN 
MAX 

Function Documentation

◆ CreateAllGatherOp()

inline ::flatbuffers::Offset<AllGatherOp> tt::target::ttnn::CreateAllGatherOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
int32_t  all_gather_dim = 0,
uint32_t  cluster_axis = 0,
uint32_t  num_links = 0 
)

◆ CreateAllGatherTensor()

inline ::flatbuffers::Offset<AllGatherTensor> tt::target::ttnn::CreateAllGatherTensor ( ::flatbuffers::FlatBufferBuilder &  _fbb)

◆ CreateArangeOp()

inline ::flatbuffers::Offset<ArangeOp> tt::target::ttnn::CreateArangeOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
float  start = 0.0f,
float  end = 0.0f,
float  step = 0.0f,
::flatbuffers::Optional< tt::target::DataType dtype = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateClampScalarOpParams()

inline ::flatbuffers::Offset<ClampScalarOpParams> tt::target::ttnn::CreateClampScalarOpParams ( ::flatbuffers::FlatBufferBuilder &  _fbb,
float  min = 0.0f,
float  max = 0.0f 
)

◆ CreateClampTensorOpParams()

inline ::flatbuffers::Offset<ClampTensorOpParams> tt::target::ttnn::CreateClampTensorOpParams ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  min = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  max = 0 
)

◆ CreateCollectivePermuteOp()

inline ::flatbuffers::Offset<CollectivePermuteOp> tt::target::ttnn::CreateCollectivePermuteOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  source_target_pairs = 0 
)

◆ CreateCollectivePermuteOpDirect()

inline ::flatbuffers::Offset<CollectivePermuteOp> tt::target::ttnn::CreateCollectivePermuteOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
const std::vector< int64_t > *  source_target_pairs = nullptr 
)

◆ CreateConcatOp()

inline ::flatbuffers::Offset<ConcatOp> tt::target::ttnn::CreateConcatOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>>  inputs = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
int32_t  dim = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0 
)

◆ CreateConcatOpDirect()

inline ::flatbuffers::Offset<ConcatOp> tt::target::ttnn::CreateConcatOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *  inputs = nullptr,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
int32_t  dim = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0 
)

◆ CreateConstantOp()

inline ::flatbuffers::Offset<ConstantOp> tt::target::ttnn::CreateConstantOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::Vector< uint8_t >>  data = 0 
)

◆ CreateConstantOpDirect()

inline ::flatbuffers::Offset<ConstantOp> tt::target::ttnn::CreateConstantOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const std::vector< uint8_t > *  data = nullptr 
)

◆ CreateConstructTensorOp()

inline ::flatbuffers::Offset<ConstructTensorOp> tt::target::ttnn::CreateConstructTensorOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  shape = 0,
tt::target::DataType  dtype = tt::target::DataType::Float32,
tt::target::TensorLayout  layout = tt::target::TensorLayout::RowMajor,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateConstructTensorOpDirect()

inline ::flatbuffers::Offset<ConstructTensorOp> tt::target::ttnn::CreateConstructTensorOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector< int64_t > *  shape = nullptr,
tt::target::DataType  dtype = tt::target::DataType::Float32,
tt::target::TensorLayout  layout = tt::target::TensorLayout::RowMajor,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateConv2dConfig()

inline ::flatbuffers::Offset<Conv2dConfig> tt::target::ttnn::CreateConv2dConfig ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::DataType  dtype = tt::target::DataType::Float32,
tt::target::DataType  weights_dtype = tt::target::DataType::Float32,
::flatbuffers::Offset<::flatbuffers::String >  activation = 0,
uint32_t  input_channels_alignment = 0,
bool  deallocate_activation = false,
bool  reallocate_halo_output = false,
uint32_t  act_block_h_override = 0,
uint32_t  act_block_w_div = 0,
bool  reshard_if_not_optimal = false,
bool  override_sharding_config = false,
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout shard_layout = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet >  core_grid = 0,
bool  transpose_shards = false,
tt::target::TensorLayout  output_layout = tt::target::TensorLayout::RowMajor,
bool  preprocess_weights_on_device = false,
bool  always_preprocess_weights = false,
bool  enable_act_double_buffer = false,
bool  enable_weights_double_buffer = false,
bool  enable_split_reader = false,
bool  enable_subblock_padding = false 
)

◆ CreateConv2dConfigDirect()

inline ::flatbuffers::Offset<Conv2dConfig> tt::target::ttnn::CreateConv2dConfigDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::DataType  dtype = tt::target::DataType::Float32,
tt::target::DataType  weights_dtype = tt::target::DataType::Float32,
const char *  activation = nullptr,
uint32_t  input_channels_alignment = 0,
bool  deallocate_activation = false,
bool  reallocate_halo_output = false,
uint32_t  act_block_h_override = 0,
uint32_t  act_block_w_div = 0,
bool  reshard_if_not_optimal = false,
bool  override_sharding_config = false,
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout shard_layout = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet >  core_grid = 0,
bool  transpose_shards = false,
tt::target::TensorLayout  output_layout = tt::target::TensorLayout::RowMajor,
bool  preprocess_weights_on_device = false,
bool  always_preprocess_weights = false,
bool  enable_act_double_buffer = false,
bool  enable_weights_double_buffer = false,
bool  enable_split_reader = false,
bool  enable_subblock_padding = false 
)

◆ CreateConv2dOp()

inline ::flatbuffers::Offset<Conv2dOp> tt::target::ttnn::CreateConv2dOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  bias = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
uint32_t  in_channels = 0,
uint32_t  out_channels = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  kernel_size = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  stride = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  padding = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  dilation = 0,
uint32_t  groups = 0,
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig >  conv2d_config = 0 
)

◆ CreateConv2dOpDirect()

inline ::flatbuffers::Offset<Conv2dOp> tt::target::ttnn::CreateConv2dOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  bias = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
uint32_t  in_channels = 0,
uint32_t  out_channels = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
const std::vector< int32_t > *  kernel_size = nullptr,
const std::vector< int32_t > *  stride = nullptr,
const std::vector< int32_t > *  padding = nullptr,
const std::vector< int32_t > *  dilation = nullptr,
uint32_t  groups = 0,
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig >  conv2d_config = 0 
)

◆ CreateConvTranspose2dOp()

inline ::flatbuffers::Offset<ConvTranspose2dOp> tt::target::ttnn::CreateConvTranspose2dOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  bias = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
uint32_t  in_channels = 0,
uint32_t  out_channels = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  kernel_size = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  stride = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  padding = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  output_padding = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  dilation = 0,
uint32_t  groups = 0 
)

◆ CreateConvTranspose2dOpDirect()

inline ::flatbuffers::Offset<ConvTranspose2dOp> tt::target::ttnn::CreateConvTranspose2dOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  bias = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
uint32_t  in_channels = 0,
uint32_t  out_channels = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
const std::vector< int32_t > *  kernel_size = nullptr,
const std::vector< int32_t > *  stride = nullptr,
const std::vector< int32_t > *  padding = nullptr,
const std::vector< int32_t > *  output_padding = nullptr,
const std::vector< int32_t > *  dilation = nullptr,
uint32_t  groups = 0 
)

◆ CreateCoreRangeSet()

inline ::flatbuffers::Offset<CoreRangeSet> tt::target::ttnn::CreateCoreRangeSet ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::ttnn::CoreRange * >>  core_ranges = 0 
)

◆ CreateCoreRangeSetDirect()

inline ::flatbuffers::Offset<CoreRangeSet> tt::target::ttnn::CreateCoreRangeSetDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector< tt::target::ttnn::CoreRange > *  core_ranges = nullptr 
)

◆ CreateCpuOp()

inline ::flatbuffers::Offset<CpuOp> tt::target::ttnn::CreateCpuOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>>  ins = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::String >  func_name = 0,
uint32_t  dylib_id = 0 
)

◆ CreateCpuOpDirect()

inline ::flatbuffers::Offset<CpuOp> tt::target::ttnn::CreateCpuOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *  ins = nullptr,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const char *  func_name = nullptr,
uint32_t  dylib_id = 0 
)

◆ CreateDeallocateOp()

inline ::flatbuffers::Offset<DeallocateOp> tt::target::ttnn::CreateDeallocateOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
bool  force = false 
)

◆ CreateDistributionStrategy()

inline ::flatbuffers::Offset<DistributionStrategy> tt::target::ttnn::CreateDistributionStrategy ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::DistributedTensorConfig  strategy_type = tt::target::ttnn::DistributedTensorConfig::NONE,
::flatbuffers::Offset< void >  strategy = 0 
)

◆ CreateEltwiseBinaryCompositeOp()

inline ::flatbuffers::Offset<EltwiseBinaryCompositeOp> tt::target::ttnn::CreateEltwiseBinaryCompositeOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::EltwiseBinaryCompositeOpType  type = tt::target::ttnn::EltwiseBinaryCompositeOpType::Maximum,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  lhs = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  rhs = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateEltwiseBinaryOp()

inline ::flatbuffers::Offset<EltwiseBinaryOp> tt::target::ttnn::CreateEltwiseBinaryOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::EltwiseBinaryOpType  type = tt::target::ttnn::EltwiseBinaryOpType::Add,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  lhs = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  rhs = 0,
::flatbuffers::Optional< tt::target::DataType output_dtype = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateEltwiseOpWithFloatParams()

inline ::flatbuffers::Offset<EltwiseOpWithFloatParams> tt::target::ttnn::CreateEltwiseOpWithFloatParams ( ::flatbuffers::FlatBufferBuilder &  _fbb,
float  parameter = 0.0f 
)

◆ CreateEltwiseQuantizationOp()

inline ::flatbuffers::Offset<EltwiseQuantizationOp> tt::target::ttnn::CreateEltwiseQuantizationOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::EltwiseQuantizationOpType  type = tt::target::ttnn::EltwiseQuantizationOpType::Quantize,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Optional< int32_t >  axis = ::flatbuffers::nullopt,
::flatbuffers::Optional< tt::target::DataType output_dtype = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
tt::target::ttnn::EltwiseQuantizationOpParams  params_type = tt::target::ttnn::EltwiseQuantizationOpParams::NONE,
::flatbuffers::Offset< void >  params = 0 
)

◆ CreateEltwiseTernaryWhereOp()

inline ::flatbuffers::Offset<EltwiseTernaryWhereOp> tt::target::ttnn::CreateEltwiseTernaryWhereOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  first = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  second = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  third = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateEltwiseUnaryCompositeOp()

inline ::flatbuffers::Offset<EltwiseUnaryCompositeOp> tt::target::ttnn::CreateEltwiseUnaryCompositeOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::EltwiseUnaryCompositeOpType  type = tt::target::ttnn::EltwiseUnaryCompositeOpType::Cbrt,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
tt::target::ttnn::EltwiseUnaryCompositeOpParams  params_type = tt::target::ttnn::EltwiseUnaryCompositeOpParams::NONE,
::flatbuffers::Offset< void >  params = 0 
)

◆ CreateEltwiseUnaryOp()

inline ::flatbuffers::Offset<EltwiseUnaryOp> tt::target::ttnn::CreateEltwiseUnaryOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::EltwiseUnaryOpType  type = tt::target::ttnn::EltwiseUnaryOpType::Abs,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
tt::target::ttnn::EltwiseUnaryOpParams  params_type = tt::target::ttnn::EltwiseUnaryOpParams::NONE,
::flatbuffers::Offset< void >  params = 0 
)

◆ CreateEmbeddingBackwardOp()

inline ::flatbuffers::Offset<EmbeddingBackwardOp> tt::target::ttnn::CreateEmbeddingBackwardOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in_grad = 0,
::flatbuffers::Optional< tt::target::DataType dtype = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateEmbeddingOp()

inline ::flatbuffers::Offset<EmbeddingOp> tt::target::ttnn::CreateEmbeddingOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateEmptyOp()

inline ::flatbuffers::Offset<EmptyOp> tt::target::ttnn::CreateEmptyOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  shape = 0,
tt::target::DataType  dtype = tt::target::DataType::Float32,
tt::target::TensorLayout  layout = tt::target::TensorLayout::RowMajor,
uint32_t  num_shards = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy >  strategy = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateEmptyOpDirect()

inline ::flatbuffers::Offset<EmptyOp> tt::target::ttnn::CreateEmptyOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector< int64_t > *  shape = nullptr,
tt::target::DataType  dtype = tt::target::DataType::Float32,
tt::target::TensorLayout  layout = tt::target::TensorLayout::RowMajor,
uint32_t  num_shards = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy >  strategy = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateFillCacheOp()

inline ::flatbuffers::Offset<FillCacheOp> tt::target::ttnn::CreateFillCacheOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  cache = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
uint32_t  batch_offset = 0 
)

◆ CreateFromDeviceOp()

inline ::flatbuffers::Offset<FromDeviceOp> tt::target::ttnn::CreateFromDeviceOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateFullOp()

inline ::flatbuffers::Offset<FullOp> tt::target::ttnn::CreateFullOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
float  fill_value = 0.0f,
uint32_t  num_shards = 0,
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy >  strategy = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateGetDeviceOp()

inline ::flatbuffers::Offset<GetDeviceOp> tt::target::ttnn::CreateGetDeviceOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::Dim2d *  mesh = nullptr,
const tt::target::Dim2d *  offset = nullptr,
::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >>  chip_ids = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  out = 0 
)

◆ CreateGetDeviceOpDirect()

inline ::flatbuffers::Offset<GetDeviceOp> tt::target::ttnn::CreateGetDeviceOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::Dim2d *  mesh = nullptr,
const tt::target::Dim2d *  offset = nullptr,
const std::vector< uint32_t > *  chip_ids = nullptr,
::flatbuffers::Offset< tt::target::DeviceRef >  out = 0 
)

◆ CreateLayoutDesc()

inline ::flatbuffers::Offset<LayoutDesc> tt::target::ttnn::CreateLayoutDesc ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::OOBVal  oob_val = tt::target::OOBVal::Undef,
::flatbuffers::Offset< tt::target::ttnn::MemoryDesc >  memory_desc = 0,
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy >  strategy = 0 
)

◆ CreateLinearOp()

inline ::flatbuffers::Offset<LinearOp> tt::target::ttnn::CreateLinearOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  a = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  b = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  bias = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
bool  transpose_a = false,
bool  transpose_b = false 
)

◆ CreateMatmulMultiCoreReuseMultiCast1DProgramConfig()

inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCast1DProgramConfig ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::ttnn::CoreCoord *  compute_with_storage_grid_size = nullptr,
uint64_t  in0_block_w = 0,
uint64_t  out_subblock_h = 0,
uint64_t  out_subblock_w = 0,
uint64_t  out_block_h = 0,
uint64_t  out_block_w = 0,
uint64_t  per_core_m = 0,
uint64_t  per_core_n = 0,
bool  fuse_batch = false,
::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam >  fused_activation = 0,
bool  mcast_in0 = false,
bool  gather_in0 = false,
::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet >  hop_cores = 0,
uint64_t  num_global_cb_receivers = 0 
)

◆ CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig()

inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig ( ::flatbuffers::FlatBufferBuilder &  _fbb,
uint64_t  in0_block_w = 0,
uint64_t  per_core_m = 0,
uint64_t  per_core_n = 0,
::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam >  fused_activation = 0 
)

◆ CreateMatmulMultiCoreReuseMultiCastProgramConfig()

inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCastProgramConfig ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::ttnn::CoreCoord *  compute_with_storage_grid_size = nullptr,
uint64_t  in0_block_w = 0,
uint64_t  out_subblock_h = 0,
uint64_t  out_subblock_w = 0,
uint64_t  out_block_h = 0,
uint64_t  out_block_w = 0,
uint64_t  per_core_m = 0,
uint64_t  per_core_n = 0,
bool  transpose_mcast = false,
::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam >  fused_activation = 0,
bool  fuse_batch = false 
)

◆ CreateMatmulMultiCoreReuseProgramConfig()

inline ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseProgramConfig ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::ttnn::CoreCoord *  compute_with_storage_grid_size = nullptr,
uint64_t  in0_block_w = 0,
uint64_t  out_subblock_h = 0,
uint64_t  out_subblock_w = 0,
uint64_t  per_core_m = 0,
uint64_t  per_core_n = 0 
)

◆ CreateMatmulOp()

inline ::flatbuffers::Offset<MatmulOp> tt::target::ttnn::CreateMatmulOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  a = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  b = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
bool  transpose_a = false,
bool  transpose_b = false,
tt::target::ttnn::MatmulProgramConfig  matmul_program_config_type = tt::target::ttnn::MatmulProgramConfig::NONE,
::flatbuffers::Offset< void >  matmul_program_config = 0 
)

◆ CreateMaxPool2dOp()

inline ::flatbuffers::Offset<MaxPool2dOp> tt::target::ttnn::CreateMaxPool2dOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
uint32_t  channels = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  kernel_size = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  stride = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  padding = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  dilation = 0,
bool  ceil_mode = false 
)

◆ CreateMaxPool2dOpDirect()

inline ::flatbuffers::Offset<MaxPool2dOp> tt::target::ttnn::CreateMaxPool2dOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
uint32_t  channels = 0,
const std::vector< int32_t > *  kernel_size = nullptr,
const std::vector< int32_t > *  stride = nullptr,
const std::vector< int32_t > *  padding = nullptr,
const std::vector< int32_t > *  dilation = nullptr,
bool  ceil_mode = false 
)

◆ CreateMemoryConfig()

inline ::flatbuffers::Offset<MemoryConfig> tt::target::ttnn::CreateMemoryConfig ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::TensorMemoryLayout  tensor_memory_layout = tt::target::ttnn::TensorMemoryLayout::Interleaved,
tt::target::BufferType  buffer_type = tt::target::BufferType::DRAM,
::flatbuffers::Offset< tt::target::ttnn::ShardSpec >  shard_spec = 0 
)

◆ CreateMemoryDesc()

inline ::flatbuffers::Offset<MemoryDesc> tt::target::ttnn::CreateMemoryDesc ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::StorageType  storage_type = tt::target::ttnn::StorageType::Owned,
const tt::target::Dim2d *  tile_shape = nullptr,
tt::target::DataType  data_type = tt::target::DataType::Float32,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
uint64_t  size = 0 
)

◆ CreateMeshShardOp()

inline ::flatbuffers::Offset<MeshShardOp> tt::target::ttnn::CreateMeshShardOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
tt::target::ttnn::MeshShardDirection  shard_direction = tt::target::ttnn::MeshShardDirection::FullToShardShape,
tt::target::ttnn::MeshShardType  shard_type = tt::target::ttnn::MeshShardType::Identity,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  shard_shape = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  shard_dims = 0 
)

◆ CreateMeshShardOpDirect()

inline ::flatbuffers::Offset<MeshShardOp> tt::target::ttnn::CreateMeshShardOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
tt::target::ttnn::MeshShardDirection  shard_direction = tt::target::ttnn::MeshShardDirection::FullToShardShape,
tt::target::ttnn::MeshShardType  shard_type = tt::target::ttnn::MeshShardType::Identity,
const std::vector< int64_t > *  shard_shape = nullptr,
const std::vector< int64_t > *  shard_dims = nullptr 
)

◆ CreateMorehCumSumOp()

inline ::flatbuffers::Offset<MorehCumSumOp> tt::target::ttnn::CreateMorehCumSumOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
int64_t  dim = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0 
)

◆ CreateNamedFullOp()

inline ::flatbuffers::Offset<NamedFullOp> tt::target::ttnn::CreateNamedFullOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::NamedFullOpType  type = tt::target::ttnn::NamedFullOpType::Zeros,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  shape = 0,
::flatbuffers::Optional< tt::target::DataType dtype = ::flatbuffers::nullopt,
::flatbuffers::Optional< tt::target::TensorLayout layout = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateNamedFullOpDirect()

inline ::flatbuffers::Offset<NamedFullOp> tt::target::ttnn::CreateNamedFullOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::NamedFullOpType  type = tt::target::ttnn::NamedFullOpType::Zeros,
const std::vector< int64_t > *  shape = nullptr,
::flatbuffers::Optional< tt::target::DataType dtype = ::flatbuffers::nullopt,
::flatbuffers::Optional< tt::target::TensorLayout layout = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateNonUniformScale2D()

inline ::flatbuffers::Offset<NonUniformScale2D> tt::target::ttnn::CreateNonUniformScale2D ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  scale = 0 
)

◆ CreateNonUniformScale2DDirect()

inline ::flatbuffers::Offset<NonUniformScale2D> tt::target::ttnn::CreateNonUniformScale2DDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector< int32_t > *  scale = nullptr 
)

◆ CreateOperation()

inline ::flatbuffers::Offset<Operation> tt::target::ttnn::CreateOperation ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::OpType  type_type = tt::target::ttnn::OpType::NONE,
::flatbuffers::Offset< void >  type = 0,
::flatbuffers::Offset<::flatbuffers::String >  debug_info = 0,
::flatbuffers::Offset<::flatbuffers::String >  loc_info = 0 
)

◆ CreateOperationDirect()

inline ::flatbuffers::Offset<Operation> tt::target::ttnn::CreateOperationDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::OpType  type_type = tt::target::ttnn::OpType::NONE,
::flatbuffers::Offset< void >  type = 0,
const char *  debug_info = nullptr,
const char *  loc_info = nullptr 
)

◆ CreatePadOp()

inline ::flatbuffers::Offset<PadOp> tt::target::ttnn::CreatePadOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >>  padding = 0,
float  value = 0.0f,
bool  use_multicore = false,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0 
)

◆ CreatePadOpDirect()

inline ::flatbuffers::Offset<PadOp> tt::target::ttnn::CreatePadOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const std::vector< uint32_t > *  padding = nullptr,
float  value = 0.0f,
bool  use_multicore = false,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0 
)

◆ CreatePermuteOp()

inline ::flatbuffers::Offset<PermuteOp> tt::target::ttnn::CreatePermuteOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  permutation = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
float  pad_value = 0.0f,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreatePermuteOpDirect()

inline ::flatbuffers::Offset<PermuteOp> tt::target::ttnn::CreatePermuteOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
const std::vector< int64_t > *  permutation = nullptr,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
float  pad_value = 0.0f,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreatePrepareConv2dWeightsOp()

inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> tt::target::ttnn::CreatePrepareConv2dWeightsOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight_tensor = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  input_memory_config = 0,
tt::target::TensorLayout  input_tensor_layout = tt::target::TensorLayout::RowMajor,
::flatbuffers::Offset<::flatbuffers::String >  weights_format = 0,
uint32_t  in_channels = 0,
uint32_t  out_channels = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  kernel_size = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  stride = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  padding = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  dilation = 0,
bool  has_bias = false,
uint32_t  groups = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig >  conv2d_config = 0 
)

◆ CreatePrepareConv2dWeightsOpDirect()

inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> tt::target::ttnn::CreatePrepareConv2dWeightsOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  weight_tensor = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  input_memory_config = 0,
tt::target::TensorLayout  input_tensor_layout = tt::target::TensorLayout::RowMajor,
const char *  weights_format = nullptr,
uint32_t  in_channels = 0,
uint32_t  out_channels = 0,
uint32_t  batch_size = 0,
uint32_t  input_height = 0,
uint32_t  input_width = 0,
const std::vector< int32_t > *  kernel_size = nullptr,
const std::vector< int32_t > *  stride = nullptr,
const std::vector< int32_t > *  padding = nullptr,
const std::vector< int32_t > *  dilation = nullptr,
bool  has_bias = false,
uint32_t  groups = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig >  conv2d_config = 0 
)

◆ CreateProgram()

inline ::flatbuffers::Offset<Program> tt::target::ttnn::CreateProgram ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::String >  name = 0,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>>  inputs = 0,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>>  outputs = 0,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Operation >>>  operations = 0,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::DynamicLib >>>  dylibs = 0,
::flatbuffers::Offset< tt::target::DebugInfo >  debug_info = 0 
)

◆ CreateProgramDirect()

inline ::flatbuffers::Offset<Program> tt::target::ttnn::CreateProgramDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const char *  name = nullptr,
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *  inputs = nullptr,
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *  outputs = nullptr,
const std::vector<::flatbuffers::Offset< tt::target::ttnn::Operation >> *  operations = nullptr,
const std::vector<::flatbuffers::Offset< tt::target::DynamicLib >> *  dylibs = nullptr,
::flatbuffers::Offset< tt::target::DebugInfo >  debug_info = 0 
)

◆ CreateQuantizeDequantizeOpParams()

inline ::flatbuffers::Offset<QuantizeDequantizeOpParams> tt::target::ttnn::CreateQuantizeDequantizeOpParams ( ::flatbuffers::FlatBufferBuilder &  _fbb,
float  scale = 0.0f,
int32_t  zero_point = 0 
)

◆ CreateReduceScatterOp()

inline ::flatbuffers::Offset<ReduceScatterOp> tt::target::ttnn::CreateReduceScatterOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
int32_t  scatter_dim = 0,
uint32_t  reduce_type = 0,
uint32_t  cluster_axis = 0,
uint32_t  num_links = 0 
)

◆ CreateReductionArgMaxOp()

inline ::flatbuffers::Offset<ReductionArgMaxOp> tt::target::ttnn::CreateReductionArgMaxOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Optional< int32_t >  dim = ::flatbuffers::nullopt,
bool  use_multicore = false,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0 
)

◆ CreateReductionOp()

inline ::flatbuffers::Offset<ReductionOp> tt::target::ttnn::CreateReductionOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::ReductionOpType  type = tt::target::ttnn::ReductionOpType::Sum,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  dim_arg = 0,
bool  keep_dim = false 
)

◆ CreateReductionOpDirect()

inline ::flatbuffers::Offset<ReductionOp> tt::target::ttnn::CreateReductionOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::ReductionOpType  type = tt::target::ttnn::ReductionOpType::Sum,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const std::vector< int32_t > *  dim_arg = nullptr,
bool  keep_dim = false 
)

◆ CreateReductionProdOp()

inline ::flatbuffers::Offset<ReductionProdOp> tt::target::ttnn::CreateReductionProdOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
bool  all_dimensions = false,
int64_t  dim_arg = 0,
bool  keep_dim = false,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0 
)

◆ CreateRepeatInterleaveOp()

inline ::flatbuffers::Offset<RepeatInterleaveOp> tt::target::ttnn::CreateRepeatInterleaveOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
uint32_t  repeats = 0,
int32_t  dim = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0 
)

◆ CreateRepeatOp()

inline ::flatbuffers::Offset<RepeatOp> tt::target::ttnn::CreateRepeatOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  repeat_dims = 0 
)

◆ CreateRepeatOpDirect()

inline ::flatbuffers::Offset<RepeatOp> tt::target::ttnn::CreateRepeatOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const std::vector< int64_t > *  repeat_dims = nullptr 
)

◆ CreateReplicateTensor()

inline ::flatbuffers::Offset<ReplicateTensor> tt::target::ttnn::CreateReplicateTensor ( ::flatbuffers::FlatBufferBuilder &  _fbb,
uint32_t  replication_factor = 0 
)

◆ CreateRequantizeOpParams()

inline ::flatbuffers::Offset<RequantizeOpParams> tt::target::ttnn::CreateRequantizeOpParams ( ::flatbuffers::FlatBufferBuilder &  _fbb,
float  in_scale = 0.0f,
int32_t  in_zero_point = 0,
float  out_scale = 0.0f,
int32_t  out_zero_point = 0 
)

◆ CreateReshapeOp()

inline ::flatbuffers::Offset<ReshapeOp> tt::target::ttnn::CreateReshapeOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  shape = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0 
)

◆ CreateReshapeOpDirect()

inline ::flatbuffers::Offset<ReshapeOp> tt::target::ttnn::CreateReshapeOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const std::vector< int32_t > *  shape = nullptr,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0 
)

◆ CreateShardSpec()

inline ::flatbuffers::Offset<ShardSpec> tt::target::ttnn::CreateShardSpec ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::Dim2dRange * >>  grid = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  shard_shape = 0 
)

◆ CreateShardSpecDirect()

inline ::flatbuffers::Offset<ShardSpec> tt::target::ttnn::CreateShardSpecDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector< tt::target::Dim2dRange > *  grid = nullptr,
const std::vector< int32_t > *  shard_shape = nullptr 
)

◆ CreateShardTensor()

inline ::flatbuffers::Offset<ShardTensor> tt::target::ttnn::CreateShardTensor ( ::flatbuffers::FlatBufferBuilder &  _fbb,
uint32_t  shard_dim = 0 
)

◆ CreateShardTensor2D()

inline ::flatbuffers::Offset<ShardTensor2D> tt::target::ttnn::CreateShardTensor2D ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::Dim2d *  shard_mesh = nullptr 
)

◆ CreateSliceOp()

inline ::flatbuffers::Offset<SliceOp> tt::target::ttnn::CreateSliceOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  begins = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  ends = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >>  step = 0 
)

◆ CreateSliceOpDirect()

inline ::flatbuffers::Offset<SliceOp> tt::target::ttnn::CreateSliceOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
const std::vector< int64_t > *  begins = nullptr,
const std::vector< int64_t > *  ends = nullptr,
const std::vector< int64_t > *  step = nullptr 
)

◆ CreateSoftmaxOp()

inline ::flatbuffers::Offset<SoftmaxOp> tt::target::ttnn::CreateSoftmaxOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
int32_t  dimension = 0 
)

◆ CreateTensorDesc()

inline ::flatbuffers::Offset<TensorDesc> tt::target::ttnn::CreateTensorDesc ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  shape = 0,
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >>  mesh_shape = 0,
::flatbuffers::Offset< tt::target::ttnn::LayoutDesc >  layout = 0 
)

◆ CreateTensorDescDirect()

inline ::flatbuffers::Offset<TensorDesc> tt::target::ttnn::CreateTensorDescDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const std::vector< int32_t > *  shape = nullptr,
const std::vector< int32_t > *  mesh_shape = nullptr,
::flatbuffers::Offset< tt::target::ttnn::LayoutDesc >  layout = 0 
)

◆ CreateTensorRef()

inline ::flatbuffers::Offset<TensorRef> tt::target::ttnn::CreateTensorRef ( ::flatbuffers::FlatBufferBuilder &  _fbb,
uint32_t  global_id = 0,
uint64_t  size = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorDesc >  desc = 0 
)

◆ CreateToDeviceOp()

inline ::flatbuffers::Offset<ToDeviceOp> tt::target::ttnn::CreateToDeviceOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateToDTypeOp()

inline ::flatbuffers::Offset<ToDTypeOp> tt::target::ttnn::CreateToDTypeOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
tt::target::DataType  dtype = tt::target::DataType::Float32,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateToLayoutOp()

inline ::flatbuffers::Offset<ToLayoutOp> tt::target::ttnn::CreateToLayoutOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
tt::target::TensorLayout  layout = tt::target::TensorLayout::RowMajor,
::flatbuffers::Optional< tt::target::DataType dtype = ::flatbuffers::nullopt,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::DeviceRef >  device = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateToMemoryConfigOp()

inline ::flatbuffers::Offset<ToMemoryConfigOp> tt::target::ttnn::CreateToMemoryConfigOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in0 = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memcfg = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateTransposeOp()

inline ::flatbuffers::Offset<TransposeOp> tt::target::ttnn::CreateTransposeOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0,
int32_t  dim0 = 0,
int32_t  dim1 = 0 
)

◆ CreateTTNNBinary()

inline ::flatbuffers::Offset<TTNNBinary> tt::target::ttnn::CreateTTNNBinary ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::Version *  version = nullptr,
::flatbuffers::Offset<::flatbuffers::String >  ttmlir_git_hash = 0,
::flatbuffers::Offset< tt::target::SystemDesc >  system_desc = 0,
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Program >>>  programs = 0 
)

◆ CreateTTNNBinaryDirect()

inline ::flatbuffers::Offset<TTNNBinary> tt::target::ttnn::CreateTTNNBinaryDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
const tt::target::Version *  version = nullptr,
const char *  ttmlir_git_hash = nullptr,
::flatbuffers::Offset< tt::target::SystemDesc >  system_desc = 0,
const std::vector<::flatbuffers::Offset< tt::target::ttnn::Program >> *  programs = nullptr 
)

◆ CreateTypecastOp()

inline ::flatbuffers::Offset<TypecastOp> tt::target::ttnn::CreateTypecastOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
tt::target::DataType  dtype = tt::target::DataType::Float32,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateUnaryWithParam()

inline ::flatbuffers::Offset<UnaryWithParam> tt::target::ttnn::CreateUnaryWithParam ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::UnaryOpType  op_type = tt::target::ttnn::UnaryOpType::Exp,
::flatbuffers::Offset<::flatbuffers::Vector< double >>  params = 0 
)

◆ CreateUnaryWithParamDirect()

inline ::flatbuffers::Offset<UnaryWithParam> tt::target::ttnn::CreateUnaryWithParamDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
tt::target::ttnn::UnaryOpType  op_type = tt::target::ttnn::UnaryOpType::Exp,
const std::vector< double > *  params = nullptr 
)

◆ CreateUniformScale2D()

inline ::flatbuffers::Offset<UniformScale2D> tt::target::ttnn::CreateUniformScale2D ( ::flatbuffers::FlatBufferBuilder &  _fbb,
int32_t  scale = 0 
)

◆ CreateUpdateCacheOp()

inline ::flatbuffers::Offset<UpdateCacheOp> tt::target::ttnn::CreateUpdateCacheOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  cache = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  input = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  update_index = 0,
uint32_t  batch_offset = 0 
)

◆ CreateUpsampleOp()

inline ::flatbuffers::Offset<UpsampleOp> tt::target::ttnn::CreateUpsampleOp ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
tt::target::ttnn::Scale2D  scale_factor_type = tt::target::ttnn::Scale2D::NONE,
::flatbuffers::Offset< void >  scale_factor = 0,
::flatbuffers::Offset<::flatbuffers::String >  mode = 0,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ CreateUpsampleOpDirect()

inline ::flatbuffers::Offset<UpsampleOp> tt::target::ttnn::CreateUpsampleOpDirect ( ::flatbuffers::FlatBufferBuilder &  _fbb,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  in = 0,
tt::target::ttnn::Scale2D  scale_factor_type = tt::target::ttnn::Scale2D::NONE,
::flatbuffers::Offset< void >  scale_factor = 0,
const char *  mode = nullptr,
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig >  memory_config = 0,
::flatbuffers::Offset< tt::target::ttnn::TensorRef >  out = 0 
)

◆ DistributionStrategy::strategy_as< tt::target::ttnn::AllGatherTensor >()

template<>
const tt::target::ttnn::AllGatherTensor* tt::target::ttnn::DistributionStrategy::strategy_as< tt::target::ttnn::AllGatherTensor > ( ) const
inline

◆ DistributionStrategy::strategy_as< tt::target::ttnn::ReplicateTensor >()

template<>
const tt::target::ttnn::ReplicateTensor* tt::target::ttnn::DistributionStrategy::strategy_as< tt::target::ttnn::ReplicateTensor > ( ) const
inline

◆ DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor >()

template<>
const tt::target::ttnn::ShardTensor* tt::target::ttnn::DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor > ( ) const
inline

◆ DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor2D >()

template<>
const tt::target::ttnn::ShardTensor2D* tt::target::ttnn::DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor2D > ( ) const
inline

◆ EltwiseQuantizationOp::params_as< tt::target::ttnn::QuantizeDequantizeOpParams >()

template<>
const tt::target::ttnn::QuantizeDequantizeOpParams* tt::target::ttnn::EltwiseQuantizationOp::params_as< tt::target::ttnn::QuantizeDequantizeOpParams > ( ) const
inline

◆ EltwiseQuantizationOp::params_as< tt::target::ttnn::RequantizeOpParams >()

template<>
const tt::target::ttnn::RequantizeOpParams* tt::target::ttnn::EltwiseQuantizationOp::params_as< tt::target::ttnn::RequantizeOpParams > ( ) const
inline

◆ EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampScalarOpParams >()

template<>
const tt::target::ttnn::ClampScalarOpParams* tt::target::ttnn::EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampScalarOpParams > ( ) const
inline

◆ EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampTensorOpParams >()

template<>
const tt::target::ttnn::ClampTensorOpParams* tt::target::ttnn::EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampTensorOpParams > ( ) const
inline

◆ EltwiseUnaryOp::params_as< tt::target::ttnn::EltwiseOpWithFloatParams >()

template<>
const tt::target::ttnn::EltwiseOpWithFloatParams* tt::target::ttnn::EltwiseUnaryOp::params_as< tt::target::ttnn::EltwiseOpWithFloatParams > ( ) const
inline

◆ EnumNameDistributedTensorConfig()

const char* tt::target::ttnn::EnumNameDistributedTensorConfig ( DistributedTensorConfig  e)
inline

◆ EnumNameEltwiseBinaryCompositeOpType()

const char* tt::target::ttnn::EnumNameEltwiseBinaryCompositeOpType ( EltwiseBinaryCompositeOpType  e)
inline

◆ EnumNameEltwiseBinaryOpType()

const char* tt::target::ttnn::EnumNameEltwiseBinaryOpType ( EltwiseBinaryOpType  e)
inline

◆ EnumNameEltwiseQuantizationOpParams()

const char* tt::target::ttnn::EnumNameEltwiseQuantizationOpParams ( EltwiseQuantizationOpParams  e)
inline

◆ EnumNameEltwiseQuantizationOpType()

const char* tt::target::ttnn::EnumNameEltwiseQuantizationOpType ( EltwiseQuantizationOpType  e)
inline

◆ EnumNameEltwiseUnaryCompositeOpParams()

const char* tt::target::ttnn::EnumNameEltwiseUnaryCompositeOpParams ( EltwiseUnaryCompositeOpParams  e)
inline

◆ EnumNameEltwiseUnaryCompositeOpType()

const char* tt::target::ttnn::EnumNameEltwiseUnaryCompositeOpType ( EltwiseUnaryCompositeOpType  e)
inline

◆ EnumNameEltwiseUnaryOpParams()

const char* tt::target::ttnn::EnumNameEltwiseUnaryOpParams ( EltwiseUnaryOpParams  e)
inline

◆ EnumNameEltwiseUnaryOpType()

const char* tt::target::ttnn::EnumNameEltwiseUnaryOpType ( EltwiseUnaryOpType  e)
inline

◆ EnumNameMatmulProgramConfig()

const char* tt::target::ttnn::EnumNameMatmulProgramConfig ( MatmulProgramConfig  e)
inline

◆ EnumNameMeshShardDirection()

const char* tt::target::ttnn::EnumNameMeshShardDirection ( MeshShardDirection  e)
inline

◆ EnumNameMeshShardType()

const char* tt::target::ttnn::EnumNameMeshShardType ( MeshShardType  e)
inline

◆ EnumNameNamedFullOpType()

const char* tt::target::ttnn::EnumNameNamedFullOpType ( NamedFullOpType  e)
inline

◆ EnumNameOpType()

const char* tt::target::ttnn::EnumNameOpType ( OpType  e)
inline

◆ EnumNameReductionOpType()

const char* tt::target::ttnn::EnumNameReductionOpType ( ReductionOpType  e)
inline

◆ EnumNameScale2D()

const char* tt::target::ttnn::EnumNameScale2D ( Scale2D  e)
inline

◆ EnumNamesDistributedTensorConfig()

const char* const* tt::target::ttnn::EnumNamesDistributedTensorConfig ( )
inline

◆ EnumNamesEltwiseBinaryCompositeOpType()

const char* const* tt::target::ttnn::EnumNamesEltwiseBinaryCompositeOpType ( )
inline

◆ EnumNamesEltwiseBinaryOpType()

const char* const* tt::target::ttnn::EnumNamesEltwiseBinaryOpType ( )
inline

◆ EnumNamesEltwiseQuantizationOpParams()

const char* const* tt::target::ttnn::EnumNamesEltwiseQuantizationOpParams ( )
inline

◆ EnumNamesEltwiseQuantizationOpType()

const char* const* tt::target::ttnn::EnumNamesEltwiseQuantizationOpType ( )
inline

◆ EnumNamesEltwiseUnaryCompositeOpParams()

const char* const* tt::target::ttnn::EnumNamesEltwiseUnaryCompositeOpParams ( )
inline

◆ EnumNamesEltwiseUnaryCompositeOpType()

const char* const* tt::target::ttnn::EnumNamesEltwiseUnaryCompositeOpType ( )
inline

◆ EnumNamesEltwiseUnaryOpParams()

const char* const* tt::target::ttnn::EnumNamesEltwiseUnaryOpParams ( )
inline

◆ EnumNamesEltwiseUnaryOpType()

const char* const* tt::target::ttnn::EnumNamesEltwiseUnaryOpType ( )
inline

◆ EnumNamesMatmulProgramConfig()

const char* const* tt::target::ttnn::EnumNamesMatmulProgramConfig ( )
inline

◆ EnumNamesMeshShardDirection()

const char* const* tt::target::ttnn::EnumNamesMeshShardDirection ( )
inline

◆ EnumNamesMeshShardType()

const char* const* tt::target::ttnn::EnumNamesMeshShardType ( )
inline

◆ EnumNamesNamedFullOpType()

const char* const* tt::target::ttnn::EnumNamesNamedFullOpType ( )
inline

◆ EnumNamesOpType()

const char* const* tt::target::ttnn::EnumNamesOpType ( )
inline

◆ EnumNamesReductionOpType()

const char* const* tt::target::ttnn::EnumNamesReductionOpType ( )
inline

◆ EnumNamesScale2D()

const char* const* tt::target::ttnn::EnumNamesScale2D ( )
inline

◆ EnumNamesStorageType()

const char* const* tt::target::ttnn::EnumNamesStorageType ( )
inline

◆ EnumNamesTensorMemoryLayout()

const char* const* tt::target::ttnn::EnumNamesTensorMemoryLayout ( )
inline

◆ EnumNameStorageType()

const char* tt::target::ttnn::EnumNameStorageType ( StorageType  e)
inline

◆ EnumNamesUnaryOpType()

const char* const* tt::target::ttnn::EnumNamesUnaryOpType ( )
inline

◆ EnumNameTensorMemoryLayout()

const char* tt::target::ttnn::EnumNameTensorMemoryLayout ( TensorMemoryLayout  e)
inline

◆ EnumNameUnaryOpType()

const char* tt::target::ttnn::EnumNameUnaryOpType ( UnaryOpType  e)
inline

◆ EnumValuesDistributedTensorConfig()

const DistributedTensorConfig(& tt::target::ttnn::EnumValuesDistributedTensorConfig ( ) )[5]
inline

◆ EnumValuesEltwiseBinaryCompositeOpType()

const EltwiseBinaryCompositeOpType(& tt::target::ttnn::EnumValuesEltwiseBinaryCompositeOpType ( ) )[9]
inline

◆ EnumValuesEltwiseBinaryOpType()

const EltwiseBinaryOpType(& tt::target::ttnn::EnumValuesEltwiseBinaryOpType ( ) )[13]
inline

◆ EnumValuesEltwiseQuantizationOpParams()

const EltwiseQuantizationOpParams(& tt::target::ttnn::EnumValuesEltwiseQuantizationOpParams ( ) )[3]
inline

◆ EnumValuesEltwiseQuantizationOpType()

const EltwiseQuantizationOpType(& tt::target::ttnn::EnumValuesEltwiseQuantizationOpType ( ) )[3]
inline

◆ EnumValuesEltwiseUnaryCompositeOpParams()

const EltwiseUnaryCompositeOpParams(& tt::target::ttnn::EnumValuesEltwiseUnaryCompositeOpParams ( ) )[3]
inline

◆ EnumValuesEltwiseUnaryCompositeOpType()

const EltwiseUnaryCompositeOpType(& tt::target::ttnn::EnumValuesEltwiseUnaryCompositeOpType ( ) )[4]
inline

◆ EnumValuesEltwiseUnaryOpParams()

const EltwiseUnaryOpParams(& tt::target::ttnn::EnumValuesEltwiseUnaryOpParams ( ) )[2]
inline

◆ EnumValuesEltwiseUnaryOpType()

const EltwiseUnaryOpType(& tt::target::ttnn::EnumValuesEltwiseUnaryOpType ( ) )[23]
inline

◆ EnumValuesMatmulProgramConfig()

const MatmulProgramConfig(& tt::target::ttnn::EnumValuesMatmulProgramConfig ( ) )[5]
inline

◆ EnumValuesMeshShardDirection()

const MeshShardDirection(& tt::target::ttnn::EnumValuesMeshShardDirection ( ) )[2]
inline

◆ EnumValuesMeshShardType()

const MeshShardType(& tt::target::ttnn::EnumValuesMeshShardType ( ) )[4]
inline

◆ EnumValuesNamedFullOpType()

const NamedFullOpType(& tt::target::ttnn::EnumValuesNamedFullOpType ( ) )[2]
inline

◆ EnumValuesOpType()

const OpType(& tt::target::ttnn::EnumValuesOpType ( ) )[50]
inline

◆ EnumValuesReductionOpType()

const ReductionOpType(& tt::target::ttnn::EnumValuesReductionOpType ( ) )[4]
inline

◆ EnumValuesScale2D()

const Scale2D(& tt::target::ttnn::EnumValuesScale2D ( ) )[3]
inline

◆ EnumValuesStorageType()

const StorageType(& tt::target::ttnn::EnumValuesStorageType ( ) )[5]
inline

◆ EnumValuesTensorMemoryLayout()

const TensorMemoryLayout(& tt::target::ttnn::EnumValuesTensorMemoryLayout ( ) )[5]
inline

◆ EnumValuesUnaryOpType()

const UnaryOpType(& tt::target::ttnn::EnumValuesUnaryOpType ( ) )[80]
inline

◆ FinishSizePrefixedTTNNBinaryBuffer()

void tt::target::ttnn::FinishSizePrefixedTTNNBinaryBuffer ( ::flatbuffers::FlatBufferBuilder &  fbb,
::flatbuffers::Offset< tt::target::ttnn::TTNNBinary >  root 
)
inline

◆ FinishTTNNBinaryBuffer()

void tt::target::ttnn::FinishTTNNBinaryBuffer ( ::flatbuffers::FlatBufferBuilder &  fbb,
::flatbuffers::Offset< tt::target::ttnn::TTNNBinary >  root 
)
inline

◆ FLATBUFFERS_MANUALLY_ALIGNED_STRUCT()

tt::target::ttnn::FLATBUFFERS_MANUALLY_ALIGNED_STRUCT ( )

◆ FLATBUFFERS_STRUCT_END() [1/2]

tt::target::ttnn::FLATBUFFERS_STRUCT_END ( CoreCoord  ,
16   
)

◆ FLATBUFFERS_STRUCT_END() [2/2]

tt::target::ttnn::FLATBUFFERS_STRUCT_END ( CoreRange  ,
32   
)

◆ GetSizePrefixedTTNNBinary()

const tt::target::ttnn::TTNNBinary* tt::target::ttnn::GetSizePrefixedTTNNBinary ( const void *  buf)
inline

◆ GetTTNNBinary()

const tt::target::ttnn::TTNNBinary* tt::target::ttnn::GetTTNNBinary ( const void *  buf)
inline

◆ MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig >()

template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig* tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig > ( ) const
inline

◆ MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig >()

template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig* tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > ( ) const
inline

◆ MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig >()

template<>
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig* tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig > ( ) const
inline

◆ MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig >()

template<>
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig* tt::target::ttnn::MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::AllGatherOp >()

template<>
const tt::target::ttnn::AllGatherOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::AllGatherOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ArangeOp >()

template<>
const tt::target::ttnn::ArangeOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ArangeOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::CollectivePermuteOp >()

template<>
const tt::target::ttnn::CollectivePermuteOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::CollectivePermuteOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ConcatOp >()

template<>
const tt::target::ttnn::ConcatOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ConcatOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ConstantOp >()

template<>
const tt::target::ttnn::ConstantOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ConstantOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ConstructTensorOp >()

template<>
const tt::target::ttnn::ConstructTensorOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ConstructTensorOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::Conv2dOp >()

template<>
const tt::target::ttnn::Conv2dOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::Conv2dOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ConvTranspose2dOp >()

template<>
const tt::target::ttnn::ConvTranspose2dOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ConvTranspose2dOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::CpuOp >()

template<>
const tt::target::ttnn::CpuOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::CpuOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::DeallocateOp >()

template<>
const tt::target::ttnn::DeallocateOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::DeallocateOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EltwiseBinaryCompositeOp >()

template<>
const tt::target::ttnn::EltwiseBinaryCompositeOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EltwiseBinaryCompositeOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EltwiseBinaryOp >()

template<>
const tt::target::ttnn::EltwiseBinaryOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EltwiseBinaryOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EltwiseQuantizationOp >()

template<>
const tt::target::ttnn::EltwiseQuantizationOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EltwiseQuantizationOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EltwiseTernaryWhereOp >()

template<>
const tt::target::ttnn::EltwiseTernaryWhereOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EltwiseTernaryWhereOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EltwiseUnaryCompositeOp >()

template<>
const tt::target::ttnn::EltwiseUnaryCompositeOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EltwiseUnaryCompositeOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EltwiseUnaryOp >()

template<>
const tt::target::ttnn::EltwiseUnaryOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EltwiseUnaryOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EmbeddingBackwardOp >()

template<>
const tt::target::ttnn::EmbeddingBackwardOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EmbeddingBackwardOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EmbeddingOp >()

template<>
const tt::target::ttnn::EmbeddingOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EmbeddingOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::EmptyOp >()

template<>
const tt::target::ttnn::EmptyOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::EmptyOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::FillCacheOp >()

template<>
const tt::target::ttnn::FillCacheOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::FillCacheOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::FromDeviceOp >()

template<>
const tt::target::ttnn::FromDeviceOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::FromDeviceOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::FullOp >()

template<>
const tt::target::ttnn::FullOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::FullOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::GetDeviceOp >()

template<>
const tt::target::ttnn::GetDeviceOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::GetDeviceOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::LinearOp >()

template<>
const tt::target::ttnn::LinearOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::LinearOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::MatmulOp >()

template<>
const tt::target::ttnn::MatmulOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::MatmulOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::MaxPool2dOp >()

template<>
const tt::target::ttnn::MaxPool2dOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::MaxPool2dOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::MeshShardOp >()

template<>
const tt::target::ttnn::MeshShardOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::MeshShardOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::MorehCumSumOp >()

template<>
const tt::target::ttnn::MorehCumSumOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::MorehCumSumOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::NamedFullOp >()

template<>
const tt::target::ttnn::NamedFullOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::NamedFullOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::PadOp >()

template<>
const tt::target::ttnn::PadOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::PadOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::PermuteOp >()

template<>
const tt::target::ttnn::PermuteOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::PermuteOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::PrepareConv2dWeightsOp >()

template<>
const tt::target::ttnn::PrepareConv2dWeightsOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::PrepareConv2dWeightsOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ReduceScatterOp >()

template<>
const tt::target::ttnn::ReduceScatterOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ReduceScatterOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ReductionArgMaxOp >()

template<>
const tt::target::ttnn::ReductionArgMaxOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ReductionArgMaxOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ReductionOp >()

template<>
const tt::target::ttnn::ReductionOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ReductionOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ReductionProdOp >()

template<>
const tt::target::ttnn::ReductionProdOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ReductionProdOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::RepeatInterleaveOp >()

template<>
const tt::target::ttnn::RepeatInterleaveOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::RepeatInterleaveOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::RepeatOp >()

template<>
const tt::target::ttnn::RepeatOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::RepeatOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ReshapeOp >()

template<>
const tt::target::ttnn::ReshapeOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ReshapeOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::SliceOp >()

template<>
const tt::target::ttnn::SliceOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::SliceOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::SoftmaxOp >()

template<>
const tt::target::ttnn::SoftmaxOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::SoftmaxOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ToDeviceOp >()

template<>
const tt::target::ttnn::ToDeviceOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ToDeviceOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ToDTypeOp >()

template<>
const tt::target::ttnn::ToDTypeOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ToDTypeOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ToLayoutOp >()

template<>
const tt::target::ttnn::ToLayoutOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ToLayoutOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::ToMemoryConfigOp >()

template<>
const tt::target::ttnn::ToMemoryConfigOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::ToMemoryConfigOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::TransposeOp >()

template<>
const tt::target::ttnn::TransposeOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::TransposeOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::TypecastOp >()

template<>
const tt::target::ttnn::TypecastOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::TypecastOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::UpdateCacheOp >()

template<>
const tt::target::ttnn::UpdateCacheOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::UpdateCacheOp > ( ) const
inline

◆ Operation::type_as< tt::target::ttnn::UpsampleOp >()

template<>
const tt::target::ttnn::UpsampleOp* tt::target::ttnn::Operation::type_as< tt::target::ttnn::UpsampleOp > ( ) const
inline

◆ SizePrefixedTTNNBinaryBufferHasIdentifier()

bool tt::target::ttnn::SizePrefixedTTNNBinaryBufferHasIdentifier ( const void *  buf)
inline

◆ TTNNBinaryBufferHasIdentifier()

bool tt::target::ttnn::TTNNBinaryBufferHasIdentifier ( const void *  buf)
inline

◆ TTNNBinaryExtension()

const char* tt::target::ttnn::TTNNBinaryExtension ( )
inline

◆ TTNNBinaryIdentifier()

const char* tt::target::ttnn::TTNNBinaryIdentifier ( )
inline

◆ UpsampleOp::scale_factor_as< tt::target::ttnn::NonUniformScale2D >()

template<>
const tt::target::ttnn::NonUniformScale2D* tt::target::ttnn::UpsampleOp::scale_factor_as< tt::target::ttnn::NonUniformScale2D > ( ) const
inline

◆ UpsampleOp::scale_factor_as< tt::target::ttnn::UniformScale2D >()

template<>
const tt::target::ttnn::UniformScale2D* tt::target::ttnn::UpsampleOp::scale_factor_as< tt::target::ttnn::UniformScale2D > ( ) const
inline

◆ VerifyDistributedTensorConfig()

bool tt::target::ttnn::VerifyDistributedTensorConfig ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
DistributedTensorConfig  type 
)
inline

◆ VerifyDistributedTensorConfigVector()

bool tt::target::ttnn::VerifyDistributedTensorConfigVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< DistributedTensorConfig > *  types 
)
inline

◆ VerifyEltwiseQuantizationOpParams()

bool tt::target::ttnn::VerifyEltwiseQuantizationOpParams ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
EltwiseQuantizationOpParams  type 
)
inline

◆ VerifyEltwiseQuantizationOpParamsVector()

bool tt::target::ttnn::VerifyEltwiseQuantizationOpParamsVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< EltwiseQuantizationOpParams > *  types 
)
inline

◆ VerifyEltwiseUnaryCompositeOpParams()

bool tt::target::ttnn::VerifyEltwiseUnaryCompositeOpParams ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
EltwiseUnaryCompositeOpParams  type 
)
inline

◆ VerifyEltwiseUnaryCompositeOpParamsVector()

bool tt::target::ttnn::VerifyEltwiseUnaryCompositeOpParamsVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< EltwiseUnaryCompositeOpParams > *  types 
)
inline

◆ VerifyEltwiseUnaryOpParams()

bool tt::target::ttnn::VerifyEltwiseUnaryOpParams ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
EltwiseUnaryOpParams  type 
)
inline

◆ VerifyEltwiseUnaryOpParamsVector()

bool tt::target::ttnn::VerifyEltwiseUnaryOpParamsVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< EltwiseUnaryOpParams > *  types 
)
inline

◆ VerifyMatmulProgramConfig()

bool tt::target::ttnn::VerifyMatmulProgramConfig ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
MatmulProgramConfig  type 
)
inline

◆ VerifyMatmulProgramConfigVector()

bool tt::target::ttnn::VerifyMatmulProgramConfigVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< MatmulProgramConfig > *  types 
)
inline

◆ VerifyOpType()

bool tt::target::ttnn::VerifyOpType ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
OpType  type 
)
inline

◆ VerifyOpTypeVector()

bool tt::target::ttnn::VerifyOpTypeVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< OpType > *  types 
)
inline

◆ VerifyScale2D()

bool tt::target::ttnn::VerifyScale2D ( ::flatbuffers::Verifier &  verifier,
const void *  obj,
Scale2D  type 
)
inline

◆ VerifyScale2DVector()

bool tt::target::ttnn::VerifyScale2DVector ( ::flatbuffers::Verifier &  verifier,
const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *  values,
const ::flatbuffers::Vector< Scale2D > *  types 
)
inline

◆ VerifySizePrefixedTTNNBinaryBuffer()

bool tt::target::ttnn::VerifySizePrefixedTTNNBinaryBuffer ( ::flatbuffers::Verifier &  verifier)
inline

◆ VerifyTTNNBinaryBuffer()

bool tt::target::ttnn::VerifyTTNNBinaryBuffer ( ::flatbuffers::Verifier &  verifier)
inline