TT-MLIR
|
Enumerations | |
enum class | NamedFullOpType : uint32_t { Zeros = 0 , Ones = 1 , MIN = Zeros , MAX = Ones } |
enum class | EltwiseBinaryOpType : uint32_t { Add = 0 , Multiply = 1 , Subtract = 2 , Equal = 3 , NotEqual = 4 , GreaterEqual = 5 , GreaterThan = 6 , LessEqual = 7 , LessThan = 8 , Divide = 9 , LogicalAnd = 10 , LogicalOr = 11 , LogicalXor = 12 , MIN = Add , MAX = LogicalXor } |
enum class | EltwiseBinaryCompositeOpType : uint32_t { Maximum = 0 , Minimum = 1 , Remainder = 2 , Scatter = 3 , Pow = 4 , Atan2 = 5 , BitwiseAnd = 6 , BitwiseOr = 7 , BitwiseXor = 8 , MIN = Maximum , MAX = BitwiseXor } |
enum class | EltwiseQuantizationOpType : uint32_t { Quantize = 0 , Dequantize = 1 , Requantize = 2 , MIN = Quantize , MAX = Requantize } |
enum class | EltwiseQuantizationOpParams : uint8_t { NONE = 0 , QuantizeDequantizeOpParams = 1 , RequantizeOpParams = 2 , MIN = NONE , MAX = RequantizeOpParams } |
enum class | EltwiseUnaryOpType : uint32_t { Abs = 0 , Ceil = 1 , Cos = 2 , Floor = 3 , Gelu = 4 , IsFinite = 5 , LogicalNot = 6 , Neg = 7 , Relu = 8 , Sqrt = 9 , Rsqrt = 10 , Sigmoid = 11 , Sin = 12 , Reciprocal = 13 , Sign = 14 , Tan = 15 , Tanh = 16 , Atan = 17 , Exp = 18 , Log = 19 , Expm1 = 20 , LeakyRelu = 21 , BitwiseNot = 22 , MIN = Abs , MAX = BitwiseNot } |
enum class | EltwiseUnaryOpParams : uint8_t { NONE = 0 , EltwiseOpWithFloatParams = 1 , MIN = NONE , MAX = EltwiseOpWithFloatParams } |
enum class | EltwiseUnaryCompositeOpType : uint32_t { Cbrt = 0 , ClampScalar = 1 , ClampTensor = 2 , Log1p = 3 , MIN = Cbrt , MAX = Log1p } |
enum class | EltwiseUnaryCompositeOpParams : uint8_t { NONE = 0 , ClampScalarOpParams = 1 , ClampTensorOpParams = 2 , MIN = NONE , MAX = ClampTensorOpParams } |
enum class | UnaryOpType : uint32_t { Exp = 0 , Recip = 1 , Gelu = 2 , Relu = 3 , Sqrt = 4 , Sigmoid = 5 , Log = 6 , Tanh = 7 , Log2 = 8 , Log10 = 9 , Sin = 10 , Cos = 11 , Abs = 12 , AbsInt32 = 13 , Sign = 14 , Square = 15 , Eqz = 16 , Nez = 17 , Gtz = 18 , Ltz = 19 , Gez = 20 , Lez = 21 , ReluMax = 22 , ReluMin = 23 , Power = 24 , LeakyRelu = 25 , Elu = 26 , Exp2 = 27 , Heaviside = 28 , Expm1 = 29 , Signbit = 30 , Asin = 31 , Acos = 32 , Rsqrt = 33 , Relu6 = 34 , Atan = 35 , Erf = 36 , Erfc = 37 , Isinf = 38 , Isposinf = 39 , Isneginf = 40 , Isnan = 41 , LogicalNotUnary = 42 , Isfinite = 43 , Erfinv = 44 , I0 = 45 , I1 = 46 , Tan = 47 , Rsub = 48 , Rdiv = 49 , Silu = 50 , Softplus = 51 , Identity = 52 , Neg = 53 , AddUnarySfpu = 54 , SubUnarySfpu = 55 , MulUnarySfpu = 56 , DivUnarySfpu = 57 , IdentityUint32 = 58 , UnaryNe = 59 , UnaryGt = 60 , UnaryLt = 61 , TiledProd = 62 , Typecast = 63 , BitwiseXor = 64 , BitwiseNot = 65 , BitwiseAnd = 66 , BitwiseOr = 67 , RightShift = 68 , Floor = 69 , FloorFloat32 = 70 , Ceil = 71 , CeilFloat32 = 72 , LeftShift = 73 , Remainder = 74 , Fmod = 75 , Dropout = 76 , Fill = 77 , PreluSfpu = 78 , ZeroPoint = 79 , MIN = Exp , MAX = ZeroPoint } |
enum class | MatmulProgramConfig : uint8_t { NONE = 0 , MatmulMultiCoreReuseProgramConfig = 1 , MatmulMultiCoreReuseMultiCastProgramConfig = 2 , MatmulMultiCoreReuseMultiCast1DProgramConfig = 3 , MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig = 4 , MIN = NONE , MAX = MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig } |
enum class | Scale2D : uint8_t { NONE = 0 , UniformScale2D = 1 , NonUniformScale2D = 2 , MIN = NONE , MAX = NonUniformScale2D } |
enum class | ReductionOpType : uint32_t { Sum = 0 , Mean = 1 , Max = 2 , Min = 3 , MIN = Sum , MAX = Min } |
enum class | OpType : uint8_t { NONE = 0 , AllGatherOp = 1 , CollectivePermuteOp = 2 , MeshShardOp = 3 , ReduceScatterOp = 4 , GetDeviceOp = 5 , PrepareConv2dWeightsOp = 6 , Conv2dOp = 7 , ConvTranspose2dOp = 8 , CpuOp = 9 , ArangeOp = 10 , ConstantOp = 11 , ConstructTensorOp = 12 , EmptyOp = 13 , FullOp = 14 , NamedFullOp = 15 , ConcatOp = 16 , PadOp = 17 , PermuteOp = 18 , RepeatInterleaveOp = 19 , RepeatOp = 20 , ReshapeOp = 21 , SliceOp = 22 , TransposeOp = 23 , DeallocateOp = 24 , EltwiseBinaryOp = 25 , EltwiseBinaryCompositeOp = 26 , EltwiseTernaryWhereOp = 27 , EltwiseQuantizationOp = 28 , EltwiseUnaryOp = 29 , EltwiseUnaryCompositeOp = 30 , EmbeddingBackwardOp = 31 , EmbeddingOp = 32 , FillCacheOp = 33 , UpdateCacheOp = 34 , FromDeviceOp = 35 , ToDeviceOp = 36 , ToDTypeOp = 37 , ToLayoutOp = 38 , ToMemoryConfigOp = 39 , TypecastOp = 40 , LinearOp = 41 , MatmulOp = 42 , MorehCumSumOp = 43 , SoftmaxOp = 44 , MaxPool2dOp = 45 , UpsampleOp = 46 , ReductionArgMaxOp = 47 , ReductionOp = 48 , ReductionProdOp = 49 , MIN = NONE , MAX = ReductionProdOp } |
enum class | TensorMemoryLayout : uint16_t { Interleaved = 0 , SingleBank = 1 , HeightSharded = 2 , WidthSharded = 3 , BlockSharded = 4 , MIN = Interleaved , MAX = BlockSharded } |
enum class | StorageType : uint16_t { Owned = 0 , Device = 1 , Borrowed = 2 , MultiDevice = 3 , MultiDeviceHost = 4 , MIN = Owned , MAX = MultiDeviceHost } |
enum class | MeshShardDirection : uint32_t { FullToShardShape = 0 , ShardToFullShape = 1 , MIN = FullToShardShape , MAX = ShardToFullShape } |
enum class | MeshShardType : uint32_t { Identity = 0 , Replicate = 1 , Maximal = 2 , Devices = 3 , MIN = Identity , MAX = Devices } |
enum class | DistributedTensorConfig : uint8_t { NONE = 0 , ReplicateTensor = 1 , ShardTensor = 2 , ShardTensor2D = 3 , AllGatherTensor = 4 , MIN = NONE , MAX = AllGatherTensor } |
Functions | |
inline ::flatbuffers::Offset< TTNNBinary > | CreateTTNNBinary (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Version *version=nullptr, ::flatbuffers::Offset<::flatbuffers::String > ttmlir_git_hash=0, ::flatbuffers::Offset< tt::target::SystemDesc > system_desc=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Program >>> programs=0) |
inline ::flatbuffers::Offset< TTNNBinary > | CreateTTNNBinaryDirect (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Version *version=nullptr, const char *ttmlir_git_hash=nullptr, ::flatbuffers::Offset< tt::target::SystemDesc > system_desc=0, const std::vector<::flatbuffers::Offset< tt::target::ttnn::Program >> *programs=nullptr) |
const tt::target::ttnn::TTNNBinary * | GetTTNNBinary (const void *buf) |
const tt::target::ttnn::TTNNBinary * | GetSizePrefixedTTNNBinary (const void *buf) |
const char * | TTNNBinaryIdentifier () |
bool | TTNNBinaryBufferHasIdentifier (const void *buf) |
bool | SizePrefixedTTNNBinaryBufferHasIdentifier (const void *buf) |
bool | VerifyTTNNBinaryBuffer (::flatbuffers::Verifier &verifier) |
bool | VerifySizePrefixedTTNNBinaryBuffer (::flatbuffers::Verifier &verifier) |
const char * | TTNNBinaryExtension () |
void | FinishTTNNBinaryBuffer (::flatbuffers::FlatBufferBuilder &fbb, ::flatbuffers::Offset< tt::target::ttnn::TTNNBinary > root) |
void | FinishSizePrefixedTTNNBinaryBuffer (::flatbuffers::FlatBufferBuilder &fbb, ::flatbuffers::Offset< tt::target::ttnn::TTNNBinary > root) |
inline ::flatbuffers::Offset< AllGatherOp > | CreateAllGatherOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, int32_t all_gather_dim=0, uint32_t cluster_axis=0, uint32_t num_links=0) |
inline ::flatbuffers::Offset< CollectivePermuteOp > | CreateCollectivePermuteOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> source_target_pairs=0) |
inline ::flatbuffers::Offset< CollectivePermuteOp > | CreateCollectivePermuteOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, const std::vector< int64_t > *source_target_pairs=nullptr) |
inline ::flatbuffers::Offset< MeshShardOp > | CreateMeshShardOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, tt::target::ttnn::MeshShardDirection shard_direction=tt::target::ttnn::MeshShardDirection::FullToShardShape, tt::target::ttnn::MeshShardType shard_type=tt::target::ttnn::MeshShardType::Identity, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_shape=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shard_dims=0) |
inline ::flatbuffers::Offset< MeshShardOp > | CreateMeshShardOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, tt::target::ttnn::MeshShardDirection shard_direction=tt::target::ttnn::MeshShardDirection::FullToShardShape, tt::target::ttnn::MeshShardType shard_type=tt::target::ttnn::MeshShardType::Identity, const std::vector< int64_t > *shard_shape=nullptr, const std::vector< int64_t > *shard_dims=nullptr) |
inline ::flatbuffers::Offset< ReduceScatterOp > | CreateReduceScatterOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, int32_t scatter_dim=0, uint32_t reduce_type=0, uint32_t cluster_axis=0, uint32_t num_links=0) |
inline ::flatbuffers::Offset< Conv2dConfig > | CreateConv2dConfig (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::DataType weights_dtype=tt::target::DataType::Float32, ::flatbuffers::Offset<::flatbuffers::String > activation=0, uint32_t input_channels_alignment=0, bool deallocate_activation=false, bool reallocate_halo_output=false, uint32_t act_block_h_override=0, uint32_t act_block_w_div=0, bool reshard_if_not_optimal=false, bool override_sharding_config=false, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, bool transpose_shards=false, tt::target::TensorLayout output_layout=tt::target::TensorLayout::RowMajor, bool preprocess_weights_on_device=false, bool always_preprocess_weights=false, bool enable_act_double_buffer=false, bool enable_weights_double_buffer=false, bool enable_split_reader=false, bool enable_subblock_padding=false) |
inline ::flatbuffers::Offset< Conv2dConfig > | CreateConv2dConfigDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::DataType weights_dtype=tt::target::DataType::Float32, const char *activation=nullptr, uint32_t input_channels_alignment=0, bool deallocate_activation=false, bool reallocate_halo_output=false, uint32_t act_block_h_override=0, uint32_t act_block_w_div=0, bool reshard_if_not_optimal=false, bool override_sharding_config=false, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, bool transpose_shards=false, tt::target::TensorLayout output_layout=tt::target::TensorLayout::RowMajor, bool preprocess_weights_on_device=false, bool always_preprocess_weights=false, bool enable_act_double_buffer=false, bool enable_weights_double_buffer=false, bool enable_split_reader=false, bool enable_subblock_padding=false) |
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > | CreatePrepareConv2dWeightsOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset<::flatbuffers::String > weights_format=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0) |
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > | CreatePrepareConv2dWeightsOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, const char *weights_format=nullptr, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0) |
inline ::flatbuffers::Offset< Conv2dOp > | CreateConv2dOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0) |
inline ::flatbuffers::Offset< Conv2dOp > | CreateConv2dOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0) |
inline ::flatbuffers::Offset< ConvTranspose2dOp > | CreateConvTranspose2dOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> output_padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0) |
inline ::flatbuffers::Offset< ConvTranspose2dOp > | CreateConvTranspose2dOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *output_padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0) |
inline ::flatbuffers::Offset< CpuOp > | CreateCpuOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> ins=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::String > func_name=0, uint32_t dylib_id=0) |
inline ::flatbuffers::Offset< CpuOp > | CreateCpuOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *ins=nullptr, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const char *func_name=nullptr, uint32_t dylib_id=0) |
const NamedFullOpType(& | EnumValuesNamedFullOpType ())[2] |
const char *const * | EnumNamesNamedFullOpType () |
const char * | EnumNameNamedFullOpType (NamedFullOpType e) |
inline ::flatbuffers::Offset< ArangeOp > | CreateArangeOp (::flatbuffers::FlatBufferBuilder &_fbb, float start=0.0f, float end=0.0f, float step=0.0f, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ConstantOp > | CreateConstantOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< uint8_t >> data=0) |
inline ::flatbuffers::Offset< ConstantOp > | CreateConstantOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< uint8_t > *data=nullptr) |
inline ::flatbuffers::Offset< ConstructTensorOp > | CreateConstructTensorOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shape=0, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ConstructTensorOp > | CreateConstructTensorOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int64_t > *shape=nullptr, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< EmptyOp > | CreateEmptyOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shape=0, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, uint32_t num_shards=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< EmptyOp > | CreateEmptyOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int64_t > *shape=nullptr, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, uint32_t num_shards=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< FullOp > | CreateFullOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, float fill_value=0.0f, uint32_t num_shards=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< NamedFullOp > | CreateNamedFullOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::NamedFullOpType type=tt::target::ttnn::NamedFullOpType::Zeros, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> shape=0, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::TensorLayout > layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< NamedFullOp > | CreateNamedFullOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::NamedFullOpType type=tt::target::ttnn::NamedFullOpType::Zeros, const std::vector< int64_t > *shape=nullptr, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::TensorLayout > layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ConcatOp > | CreateConcatOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> inputs=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0) |
inline ::flatbuffers::Offset< ConcatOp > | CreateConcatOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *inputs=nullptr, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0) |
inline ::flatbuffers::Offset< PadOp > | CreatePadOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >> padding=0, float value=0.0f, bool use_multicore=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0) |
inline ::flatbuffers::Offset< PadOp > | CreatePadOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< uint32_t > *padding=nullptr, float value=0.0f, bool use_multicore=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0) |
inline ::flatbuffers::Offset< PermuteOp > | CreatePermuteOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> permutation=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, float pad_value=0.0f, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< PermuteOp > | CreatePermuteOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, const std::vector< int64_t > *permutation=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, float pad_value=0.0f, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< RepeatInterleaveOp > | CreateRepeatInterleaveOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t repeats=0, int32_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0) |
inline ::flatbuffers::Offset< RepeatOp > | CreateRepeatOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> repeat_dims=0) |
inline ::flatbuffers::Offset< RepeatOp > | CreateRepeatOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int64_t > *repeat_dims=nullptr) |
inline ::flatbuffers::Offset< ReshapeOp > | CreateReshapeOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> shape=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0) |
inline ::flatbuffers::Offset< ReshapeOp > | CreateReshapeOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int32_t > *shape=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0) |
inline ::flatbuffers::Offset< SliceOp > | CreateSliceOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> begins=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> ends=0, ::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> step=0) |
inline ::flatbuffers::Offset< SliceOp > | CreateSliceOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int64_t > *begins=nullptr, const std::vector< int64_t > *ends=nullptr, const std::vector< int64_t > *step=nullptr) |
inline ::flatbuffers::Offset< TransposeOp > | CreateTransposeOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dim0=0, int32_t dim1=0) |
inline ::flatbuffers::Offset< DeallocateOp > | CreateDeallocateOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, bool force=false) |
const EltwiseBinaryOpType(& | EnumValuesEltwiseBinaryOpType ())[13] |
const char *const * | EnumNamesEltwiseBinaryOpType () |
const char * | EnumNameEltwiseBinaryOpType (EltwiseBinaryOpType e) |
const EltwiseBinaryCompositeOpType(& | EnumValuesEltwiseBinaryCompositeOpType ())[9] |
const char *const * | EnumNamesEltwiseBinaryCompositeOpType () |
const char * | EnumNameEltwiseBinaryCompositeOpType (EltwiseBinaryCompositeOpType e) |
const EltwiseQuantizationOpType(& | EnumValuesEltwiseQuantizationOpType ())[3] |
const char *const * | EnumNamesEltwiseQuantizationOpType () |
const char * | EnumNameEltwiseQuantizationOpType (EltwiseQuantizationOpType e) |
const EltwiseQuantizationOpParams(& | EnumValuesEltwiseQuantizationOpParams ())[3] |
const char *const * | EnumNamesEltwiseQuantizationOpParams () |
const char * | EnumNameEltwiseQuantizationOpParams (EltwiseQuantizationOpParams e) |
bool | VerifyEltwiseQuantizationOpParams (::flatbuffers::Verifier &verifier, const void *obj, EltwiseQuantizationOpParams type) |
bool | VerifyEltwiseQuantizationOpParamsVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< EltwiseQuantizationOpParams > *types) |
const EltwiseUnaryOpType(& | EnumValuesEltwiseUnaryOpType ())[23] |
const char *const * | EnumNamesEltwiseUnaryOpType () |
const char * | EnumNameEltwiseUnaryOpType (EltwiseUnaryOpType e) |
const EltwiseUnaryOpParams(& | EnumValuesEltwiseUnaryOpParams ())[2] |
const char *const * | EnumNamesEltwiseUnaryOpParams () |
const char * | EnumNameEltwiseUnaryOpParams (EltwiseUnaryOpParams e) |
bool | VerifyEltwiseUnaryOpParams (::flatbuffers::Verifier &verifier, const void *obj, EltwiseUnaryOpParams type) |
bool | VerifyEltwiseUnaryOpParamsVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< EltwiseUnaryOpParams > *types) |
const EltwiseUnaryCompositeOpType(& | EnumValuesEltwiseUnaryCompositeOpType ())[4] |
const char *const * | EnumNamesEltwiseUnaryCompositeOpType () |
const char * | EnumNameEltwiseUnaryCompositeOpType (EltwiseUnaryCompositeOpType e) |
const EltwiseUnaryCompositeOpParams(& | EnumValuesEltwiseUnaryCompositeOpParams ())[3] |
const char *const * | EnumNamesEltwiseUnaryCompositeOpParams () |
const char * | EnumNameEltwiseUnaryCompositeOpParams (EltwiseUnaryCompositeOpParams e) |
bool | VerifyEltwiseUnaryCompositeOpParams (::flatbuffers::Verifier &verifier, const void *obj, EltwiseUnaryCompositeOpParams type) |
bool | VerifyEltwiseUnaryCompositeOpParamsVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< EltwiseUnaryCompositeOpParams > *types) |
const UnaryOpType(& | EnumValuesUnaryOpType ())[80] |
const char *const * | EnumNamesUnaryOpType () |
const char * | EnumNameUnaryOpType (UnaryOpType e) |
inline ::flatbuffers::Offset< EltwiseOpWithFloatParams > | CreateEltwiseOpWithFloatParams (::flatbuffers::FlatBufferBuilder &_fbb, float parameter=0.0f) |
inline ::flatbuffers::Offset< EltwiseBinaryOp > | CreateEltwiseBinaryOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseBinaryOpType type=tt::target::ttnn::EltwiseBinaryOpType::Add, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > lhs=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > rhs=0, ::flatbuffers::Optional< tt::target::DataType > output_dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< EltwiseBinaryCompositeOp > | CreateEltwiseBinaryCompositeOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseBinaryCompositeOpType type=tt::target::ttnn::EltwiseBinaryCompositeOpType::Maximum, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > lhs=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > rhs=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< QuantizeDequantizeOpParams > | CreateQuantizeDequantizeOpParams (::flatbuffers::FlatBufferBuilder &_fbb, float scale=0.0f, int32_t zero_point=0) |
inline ::flatbuffers::Offset< RequantizeOpParams > | CreateRequantizeOpParams (::flatbuffers::FlatBufferBuilder &_fbb, float in_scale=0.0f, int32_t in_zero_point=0, float out_scale=0.0f, int32_t out_zero_point=0) |
template<> | |
const tt::target::ttnn::QuantizeDequantizeOpParams * | EltwiseQuantizationOp::params_as< tt::target::ttnn::QuantizeDequantizeOpParams > () const |
template<> | |
const tt::target::ttnn::RequantizeOpParams * | EltwiseQuantizationOp::params_as< tt::target::ttnn::RequantizeOpParams > () const |
inline ::flatbuffers::Offset< EltwiseQuantizationOp > | CreateEltwiseQuantizationOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseQuantizationOpType type=tt::target::ttnn::EltwiseQuantizationOpType::Quantize, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Optional< int32_t > axis=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::DataType > output_dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, tt::target::ttnn::EltwiseQuantizationOpParams params_type=tt::target::ttnn::EltwiseQuantizationOpParams::NONE, ::flatbuffers::Offset< void > params=0) |
inline ::flatbuffers::Offset< EltwiseTernaryWhereOp > | CreateEltwiseTernaryWhereOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > first=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > second=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > third=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
template<> | |
const tt::target::ttnn::EltwiseOpWithFloatParams * | EltwiseUnaryOp::params_as< tt::target::ttnn::EltwiseOpWithFloatParams > () const |
inline ::flatbuffers::Offset< EltwiseUnaryOp > | CreateEltwiseUnaryOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseUnaryOpType type=tt::target::ttnn::EltwiseUnaryOpType::Abs, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, tt::target::ttnn::EltwiseUnaryOpParams params_type=tt::target::ttnn::EltwiseUnaryOpParams::NONE, ::flatbuffers::Offset< void > params=0) |
inline ::flatbuffers::Offset< ClampScalarOpParams > | CreateClampScalarOpParams (::flatbuffers::FlatBufferBuilder &_fbb, float min=0.0f, float max=0.0f) |
inline ::flatbuffers::Offset< ClampTensorOpParams > | CreateClampTensorOpParams (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > min=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > max=0) |
template<> | |
const tt::target::ttnn::ClampScalarOpParams * | EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampScalarOpParams > () const |
template<> | |
const tt::target::ttnn::ClampTensorOpParams * | EltwiseUnaryCompositeOp::params_as< tt::target::ttnn::ClampTensorOpParams > () const |
inline ::flatbuffers::Offset< EltwiseUnaryCompositeOp > | CreateEltwiseUnaryCompositeOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::EltwiseUnaryCompositeOpType type=tt::target::ttnn::EltwiseUnaryCompositeOpType::Cbrt, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, tt::target::ttnn::EltwiseUnaryCompositeOpParams params_type=tt::target::ttnn::EltwiseUnaryCompositeOpParams::NONE, ::flatbuffers::Offset< void > params=0) |
inline ::flatbuffers::Offset< UnaryWithParam > | CreateUnaryWithParam (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::UnaryOpType op_type=tt::target::ttnn::UnaryOpType::Exp, ::flatbuffers::Offset<::flatbuffers::Vector< double >> params=0) |
inline ::flatbuffers::Offset< UnaryWithParam > | CreateUnaryWithParamDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::UnaryOpType op_type=tt::target::ttnn::UnaryOpType::Exp, const std::vector< double > *params=nullptr) |
inline ::flatbuffers::Offset< EmbeddingOp > | CreateEmbeddingOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< EmbeddingBackwardOp > | CreateEmbeddingBackwardOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in_grad=0, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< GetDeviceOp > | CreateGetDeviceOp (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Dim2d *mesh=nullptr, const tt::target::Dim2d *offset=nullptr, ::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >> chip_ids=0, ::flatbuffers::Offset< tt::target::DeviceRef > out=0) |
inline ::flatbuffers::Offset< GetDeviceOp > | CreateGetDeviceOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Dim2d *mesh=nullptr, const tt::target::Dim2d *offset=nullptr, const std::vector< uint32_t > *chip_ids=nullptr, ::flatbuffers::Offset< tt::target::DeviceRef > out=0) |
inline ::flatbuffers::Offset< FillCacheOp > | CreateFillCacheOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > cache=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, uint32_t batch_offset=0) |
inline ::flatbuffers::Offset< UpdateCacheOp > | CreateUpdateCacheOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > cache=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > update_index=0, uint32_t batch_offset=0) |
inline ::flatbuffers::Offset< FromDeviceOp > | CreateFromDeviceOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ToDeviceOp > | CreateToDeviceOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ToDTypeOp > | CreateToDTypeOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::DataType dtype=tt::target::DataType::Float32, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ToLayoutOp > | CreateToLayoutOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::TensorLayout layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< ToMemoryConfigOp > | CreateToMemoryConfigOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in0=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< TypecastOp > | CreateTypecastOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::DataType dtype=tt::target::DataType::Float32, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
const MatmulProgramConfig(& | EnumValuesMatmulProgramConfig ())[5] |
const char *const * | EnumNamesMatmulProgramConfig () |
const char * | EnumNameMatmulProgramConfig (MatmulProgramConfig e) |
bool | VerifyMatmulProgramConfig (::flatbuffers::Verifier &verifier, const void *obj, MatmulProgramConfig type) |
bool | VerifyMatmulProgramConfigVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< MatmulProgramConfig > *types) |
inline ::flatbuffers::Offset< MatmulMultiCoreReuseProgramConfig > | CreateMatmulMultiCoreReuseProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0) |
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastProgramConfig > | CreateMatmulMultiCoreReuseMultiCastProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool transpose_mcast=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool fuse_batch=false) |
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCast1DProgramConfig > | CreateMatmulMultiCoreReuseMultiCast1DProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::ttnn::CoreCoord *compute_with_storage_grid_size=nullptr, uint64_t in0_block_w=0, uint64_t out_subblock_h=0, uint64_t out_subblock_w=0, uint64_t out_block_h=0, uint64_t out_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, bool fuse_batch=false, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0, bool mcast_in0=false, bool gather_in0=false, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > hop_cores=0, uint64_t num_global_cb_receivers=0) |
inline ::flatbuffers::Offset< MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > | CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig (::flatbuffers::FlatBufferBuilder &_fbb, uint64_t in0_block_w=0, uint64_t per_core_m=0, uint64_t per_core_n=0, ::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > fused_activation=0) |
template<> | |
const tt::target::ttnn::MatmulMultiCoreReuseProgramConfig * | MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseProgramConfig > () const |
template<> | |
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig * | MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastProgramConfig > () const |
template<> | |
const tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig * | MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCast1DProgramConfig > () const |
template<> | |
const tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig * | MatmulOp::matmul_program_config_as< tt::target::ttnn::MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig > () const |
inline ::flatbuffers::Offset< MatmulOp > | CreateMatmulOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false, tt::target::ttnn::MatmulProgramConfig matmul_program_config_type=tt::target::ttnn::MatmulProgramConfig::NONE, ::flatbuffers::Offset< void > matmul_program_config=0) |
inline ::flatbuffers::Offset< LinearOp > | CreateLinearOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > a=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > b=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool transpose_a=false, bool transpose_b=false) |
inline ::flatbuffers::Offset< MorehCumSumOp > | CreateMorehCumSumOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int64_t dim=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0) |
const Scale2D(& | EnumValuesScale2D ())[3] |
const char *const * | EnumNamesScale2D () |
const char * | EnumNameScale2D (Scale2D e) |
bool | VerifyScale2D (::flatbuffers::Verifier &verifier, const void *obj, Scale2D type) |
bool | VerifyScale2DVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< Scale2D > *types) |
inline ::flatbuffers::Offset< MaxPool2dOp > | CreateMaxPool2dOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool ceil_mode=false) |
inline ::flatbuffers::Offset< MaxPool2dOp > | CreateMaxPool2dOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool ceil_mode=false) |
inline ::flatbuffers::Offset< UniformScale2D > | CreateUniformScale2D (::flatbuffers::FlatBufferBuilder &_fbb, int32_t scale=0) |
inline ::flatbuffers::Offset< NonUniformScale2D > | CreateNonUniformScale2D (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> scale=0) |
inline ::flatbuffers::Offset< NonUniformScale2D > | CreateNonUniformScale2DDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int32_t > *scale=nullptr) |
template<> | |
const tt::target::ttnn::UniformScale2D * | UpsampleOp::scale_factor_as< tt::target::ttnn::UniformScale2D > () const |
template<> | |
const tt::target::ttnn::NonUniformScale2D * | UpsampleOp::scale_factor_as< tt::target::ttnn::NonUniformScale2D > () const |
inline ::flatbuffers::Offset< UpsampleOp > | CreateUpsampleOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, ::flatbuffers::Offset<::flatbuffers::String > mode=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
inline ::flatbuffers::Offset< UpsampleOp > | CreateUpsampleOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, const char *mode=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0) |
const ReductionOpType(& | EnumValuesReductionOpType ())[4] |
const char *const * | EnumNamesReductionOpType () |
const char * | EnumNameReductionOpType (ReductionOpType e) |
inline ::flatbuffers::Offset< ReductionOp > | CreateReductionOp (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::ReductionOpType type=tt::target::ttnn::ReductionOpType::Sum, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dim_arg=0, bool keep_dim=false) |
inline ::flatbuffers::Offset< ReductionOp > | CreateReductionOpDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::ReductionOpType type=tt::target::ttnn::ReductionOpType::Sum, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, const std::vector< int32_t > *dim_arg=nullptr, bool keep_dim=false) |
inline ::flatbuffers::Offset< ReductionArgMaxOp > | CreateReductionArgMaxOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Optional< int32_t > dim=::flatbuffers::nullopt, bool use_multicore=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0) |
inline ::flatbuffers::Offset< ReductionProdOp > | CreateReductionProdOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, bool all_dimensions=false, int64_t dim_arg=0, bool keep_dim=false, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memcfg=0) |
inline ::flatbuffers::Offset< SoftmaxOp > | CreateSoftmaxOp (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dimension=0) |
const OpType(& | EnumValuesOpType ())[50] |
const char *const * | EnumNamesOpType () |
const char * | EnumNameOpType (OpType e) |
bool | VerifyOpType (::flatbuffers::Verifier &verifier, const void *obj, OpType type) |
bool | VerifyOpTypeVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< OpType > *types) |
template<> | |
const tt::target::ttnn::AllGatherOp * | Operation::type_as< tt::target::ttnn::AllGatherOp > () const |
template<> | |
const tt::target::ttnn::CollectivePermuteOp * | Operation::type_as< tt::target::ttnn::CollectivePermuteOp > () const |
template<> | |
const tt::target::ttnn::MeshShardOp * | Operation::type_as< tt::target::ttnn::MeshShardOp > () const |
template<> | |
const tt::target::ttnn::ReduceScatterOp * | Operation::type_as< tt::target::ttnn::ReduceScatterOp > () const |
template<> | |
const tt::target::ttnn::GetDeviceOp * | Operation::type_as< tt::target::ttnn::GetDeviceOp > () const |
template<> | |
const tt::target::ttnn::PrepareConv2dWeightsOp * | Operation::type_as< tt::target::ttnn::PrepareConv2dWeightsOp > () const |
template<> | |
const tt::target::ttnn::Conv2dOp * | Operation::type_as< tt::target::ttnn::Conv2dOp > () const |
template<> | |
const tt::target::ttnn::ConvTranspose2dOp * | Operation::type_as< tt::target::ttnn::ConvTranspose2dOp > () const |
template<> | |
const tt::target::ttnn::CpuOp * | Operation::type_as< tt::target::ttnn::CpuOp > () const |
template<> | |
const tt::target::ttnn::ArangeOp * | Operation::type_as< tt::target::ttnn::ArangeOp > () const |
template<> | |
const tt::target::ttnn::ConstantOp * | Operation::type_as< tt::target::ttnn::ConstantOp > () const |
template<> | |
const tt::target::ttnn::ConstructTensorOp * | Operation::type_as< tt::target::ttnn::ConstructTensorOp > () const |
template<> | |
const tt::target::ttnn::EmptyOp * | Operation::type_as< tt::target::ttnn::EmptyOp > () const |
template<> | |
const tt::target::ttnn::FullOp * | Operation::type_as< tt::target::ttnn::FullOp > () const |
template<> | |
const tt::target::ttnn::NamedFullOp * | Operation::type_as< tt::target::ttnn::NamedFullOp > () const |
template<> | |
const tt::target::ttnn::ConcatOp * | Operation::type_as< tt::target::ttnn::ConcatOp > () const |
template<> | |
const tt::target::ttnn::PadOp * | Operation::type_as< tt::target::ttnn::PadOp > () const |
template<> | |
const tt::target::ttnn::PermuteOp * | Operation::type_as< tt::target::ttnn::PermuteOp > () const |
template<> | |
const tt::target::ttnn::RepeatInterleaveOp * | Operation::type_as< tt::target::ttnn::RepeatInterleaveOp > () const |
template<> | |
const tt::target::ttnn::RepeatOp * | Operation::type_as< tt::target::ttnn::RepeatOp > () const |
template<> | |
const tt::target::ttnn::ReshapeOp * | Operation::type_as< tt::target::ttnn::ReshapeOp > () const |
template<> | |
const tt::target::ttnn::SliceOp * | Operation::type_as< tt::target::ttnn::SliceOp > () const |
template<> | |
const tt::target::ttnn::TransposeOp * | Operation::type_as< tt::target::ttnn::TransposeOp > () const |
template<> | |
const tt::target::ttnn::DeallocateOp * | Operation::type_as< tt::target::ttnn::DeallocateOp > () const |
template<> | |
const tt::target::ttnn::EltwiseBinaryOp * | Operation::type_as< tt::target::ttnn::EltwiseBinaryOp > () const |
template<> | |
const tt::target::ttnn::EltwiseBinaryCompositeOp * | Operation::type_as< tt::target::ttnn::EltwiseBinaryCompositeOp > () const |
template<> | |
const tt::target::ttnn::EltwiseTernaryWhereOp * | Operation::type_as< tt::target::ttnn::EltwiseTernaryWhereOp > () const |
template<> | |
const tt::target::ttnn::EltwiseQuantizationOp * | Operation::type_as< tt::target::ttnn::EltwiseQuantizationOp > () const |
template<> | |
const tt::target::ttnn::EltwiseUnaryOp * | Operation::type_as< tt::target::ttnn::EltwiseUnaryOp > () const |
template<> | |
const tt::target::ttnn::EltwiseUnaryCompositeOp * | Operation::type_as< tt::target::ttnn::EltwiseUnaryCompositeOp > () const |
template<> | |
const tt::target::ttnn::EmbeddingBackwardOp * | Operation::type_as< tt::target::ttnn::EmbeddingBackwardOp > () const |
template<> | |
const tt::target::ttnn::EmbeddingOp * | Operation::type_as< tt::target::ttnn::EmbeddingOp > () const |
template<> | |
const tt::target::ttnn::FillCacheOp * | Operation::type_as< tt::target::ttnn::FillCacheOp > () const |
template<> | |
const tt::target::ttnn::UpdateCacheOp * | Operation::type_as< tt::target::ttnn::UpdateCacheOp > () const |
template<> | |
const tt::target::ttnn::FromDeviceOp * | Operation::type_as< tt::target::ttnn::FromDeviceOp > () const |
template<> | |
const tt::target::ttnn::ToDeviceOp * | Operation::type_as< tt::target::ttnn::ToDeviceOp > () const |
template<> | |
const tt::target::ttnn::ToDTypeOp * | Operation::type_as< tt::target::ttnn::ToDTypeOp > () const |
template<> | |
const tt::target::ttnn::ToLayoutOp * | Operation::type_as< tt::target::ttnn::ToLayoutOp > () const |
template<> | |
const tt::target::ttnn::ToMemoryConfigOp * | Operation::type_as< tt::target::ttnn::ToMemoryConfigOp > () const |
template<> | |
const tt::target::ttnn::TypecastOp * | Operation::type_as< tt::target::ttnn::TypecastOp > () const |
template<> | |
const tt::target::ttnn::LinearOp * | Operation::type_as< tt::target::ttnn::LinearOp > () const |
template<> | |
const tt::target::ttnn::MatmulOp * | Operation::type_as< tt::target::ttnn::MatmulOp > () const |
template<> | |
const tt::target::ttnn::MorehCumSumOp * | Operation::type_as< tt::target::ttnn::MorehCumSumOp > () const |
template<> | |
const tt::target::ttnn::SoftmaxOp * | Operation::type_as< tt::target::ttnn::SoftmaxOp > () const |
template<> | |
const tt::target::ttnn::MaxPool2dOp * | Operation::type_as< tt::target::ttnn::MaxPool2dOp > () const |
template<> | |
const tt::target::ttnn::UpsampleOp * | Operation::type_as< tt::target::ttnn::UpsampleOp > () const |
template<> | |
const tt::target::ttnn::ReductionArgMaxOp * | Operation::type_as< tt::target::ttnn::ReductionArgMaxOp > () const |
template<> | |
const tt::target::ttnn::ReductionOp * | Operation::type_as< tt::target::ttnn::ReductionOp > () const |
template<> | |
const tt::target::ttnn::ReductionProdOp * | Operation::type_as< tt::target::ttnn::ReductionProdOp > () const |
inline ::flatbuffers::Offset< Operation > | CreateOperation (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::OpType type_type=tt::target::ttnn::OpType::NONE, ::flatbuffers::Offset< void > type=0, ::flatbuffers::Offset<::flatbuffers::String > debug_info=0, ::flatbuffers::Offset<::flatbuffers::String > loc_info=0) |
inline ::flatbuffers::Offset< Operation > | CreateOperationDirect (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::OpType type_type=tt::target::ttnn::OpType::NONE, ::flatbuffers::Offset< void > type=0, const char *debug_info=nullptr, const char *loc_info=nullptr) |
inline ::flatbuffers::Offset< Program > | CreateProgram (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::String > name=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> inputs=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> outputs=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Operation >>> operations=0, ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::DynamicLib >>> dylibs=0, ::flatbuffers::Offset< tt::target::DebugInfo > debug_info=0) |
inline ::flatbuffers::Offset< Program > | CreateProgramDirect (::flatbuffers::FlatBufferBuilder &_fbb, const char *name=nullptr, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *inputs=nullptr, const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> *outputs=nullptr, const std::vector<::flatbuffers::Offset< tt::target::ttnn::Operation >> *operations=nullptr, const std::vector<::flatbuffers::Offset< tt::target::DynamicLib >> *dylibs=nullptr, ::flatbuffers::Offset< tt::target::DebugInfo > debug_info=0) |
const TensorMemoryLayout(& | EnumValuesTensorMemoryLayout ())[5] |
const char *const * | EnumNamesTensorMemoryLayout () |
const char * | EnumNameTensorMemoryLayout (TensorMemoryLayout e) |
const StorageType(& | EnumValuesStorageType ())[5] |
const char *const * | EnumNamesStorageType () |
const char * | EnumNameStorageType (StorageType e) |
const MeshShardDirection(& | EnumValuesMeshShardDirection ())[2] |
const char *const * | EnumNamesMeshShardDirection () |
const char * | EnumNameMeshShardDirection (MeshShardDirection e) |
const MeshShardType(& | EnumValuesMeshShardType ())[4] |
const char *const * | EnumNamesMeshShardType () |
const char * | EnumNameMeshShardType (MeshShardType e) |
const DistributedTensorConfig(& | EnumValuesDistributedTensorConfig ())[5] |
const char *const * | EnumNamesDistributedTensorConfig () |
const char * | EnumNameDistributedTensorConfig (DistributedTensorConfig e) |
bool | VerifyDistributedTensorConfig (::flatbuffers::Verifier &verifier, const void *obj, DistributedTensorConfig type) |
bool | VerifyDistributedTensorConfigVector (::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< DistributedTensorConfig > *types) |
FLATBUFFERS_MANUALLY_ALIGNED_STRUCT (8) CoreCoord FLATBUFFERS_FINAL_CLASS | |
FLATBUFFERS_STRUCT_END (CoreCoord, 16) | |
FLATBUFFERS_STRUCT_END (CoreRange, 32) | |
inline ::flatbuffers::Offset< CoreRangeSet > | CreateCoreRangeSet (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::ttnn::CoreRange * >> core_ranges=0) |
inline ::flatbuffers::Offset< CoreRangeSet > | CreateCoreRangeSetDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< tt::target::ttnn::CoreRange > *core_ranges=nullptr) |
inline ::flatbuffers::Offset< ReplicateTensor > | CreateReplicateTensor (::flatbuffers::FlatBufferBuilder &_fbb, uint32_t replication_factor=0) |
inline ::flatbuffers::Offset< ShardTensor > | CreateShardTensor (::flatbuffers::FlatBufferBuilder &_fbb, uint32_t shard_dim=0) |
inline ::flatbuffers::Offset< ShardTensor2D > | CreateShardTensor2D (::flatbuffers::FlatBufferBuilder &_fbb, const tt::target::Dim2d *shard_mesh=nullptr) |
inline ::flatbuffers::Offset< AllGatherTensor > | CreateAllGatherTensor (::flatbuffers::FlatBufferBuilder &_fbb) |
template<> | |
const tt::target::ttnn::ReplicateTensor * | DistributionStrategy::strategy_as< tt::target::ttnn::ReplicateTensor > () const |
template<> | |
const tt::target::ttnn::ShardTensor * | DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor > () const |
template<> | |
const tt::target::ttnn::ShardTensor2D * | DistributionStrategy::strategy_as< tt::target::ttnn::ShardTensor2D > () const |
template<> | |
const tt::target::ttnn::AllGatherTensor * | DistributionStrategy::strategy_as< tt::target::ttnn::AllGatherTensor > () const |
inline ::flatbuffers::Offset< DistributionStrategy > | CreateDistributionStrategy (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::DistributedTensorConfig strategy_type=tt::target::ttnn::DistributedTensorConfig::NONE, ::flatbuffers::Offset< void > strategy=0) |
inline ::flatbuffers::Offset< ShardSpec > | CreateShardSpec (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::Dim2dRange * >> grid=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> shard_shape=0) |
inline ::flatbuffers::Offset< ShardSpec > | CreateShardSpecDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< tt::target::Dim2dRange > *grid=nullptr, const std::vector< int32_t > *shard_shape=nullptr) |
inline ::flatbuffers::Offset< MemoryConfig > | CreateMemoryConfig (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::TensorMemoryLayout tensor_memory_layout=tt::target::ttnn::TensorMemoryLayout::Interleaved, tt::target::BufferType buffer_type=tt::target::BufferType::DRAM, ::flatbuffers::Offset< tt::target::ttnn::ShardSpec > shard_spec=0) |
inline ::flatbuffers::Offset< MemoryDesc > | CreateMemoryDesc (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::StorageType storage_type=tt::target::ttnn::StorageType::Owned, const tt::target::Dim2d *tile_shape=nullptr, tt::target::DataType data_type=tt::target::DataType::Float32, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, uint64_t size=0) |
inline ::flatbuffers::Offset< LayoutDesc > | CreateLayoutDesc (::flatbuffers::FlatBufferBuilder &_fbb, tt::target::OOBVal oob_val=tt::target::OOBVal::Undef, ::flatbuffers::Offset< tt::target::ttnn::MemoryDesc > memory_desc=0, ::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > strategy=0) |
inline ::flatbuffers::Offset< TensorDesc > | CreateTensorDesc (::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> shape=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> mesh_shape=0, ::flatbuffers::Offset< tt::target::ttnn::LayoutDesc > layout=0) |
inline ::flatbuffers::Offset< TensorDesc > | CreateTensorDescDirect (::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int32_t > *shape=nullptr, const std::vector< int32_t > *mesh_shape=nullptr, ::flatbuffers::Offset< tt::target::ttnn::LayoutDesc > layout=0) |
inline ::flatbuffers::Offset< TensorRef > | CreateTensorRef (::flatbuffers::FlatBufferBuilder &_fbb, uint32_t global_id=0, uint64_t size=0, ::flatbuffers::Offset< tt::target::ttnn::TensorDesc > desc=0) |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
|
strong |
inline ::flatbuffers::Offset<AllGatherOp> tt::target::ttnn::CreateAllGatherOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
int32_t | all_gather_dim = 0 , |
||
uint32_t | cluster_axis = 0 , |
||
uint32_t | num_links = 0 |
||
) |
inline ::flatbuffers::Offset<AllGatherTensor> tt::target::ttnn::CreateAllGatherTensor | ( | ::flatbuffers::FlatBufferBuilder & | _fbb | ) |
inline ::flatbuffers::Offset<ArangeOp> tt::target::ttnn::CreateArangeOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
float | start = 0.0f , |
||
float | end = 0.0f , |
||
float | step = 0.0f , |
||
::flatbuffers::Optional< tt::target::DataType > | dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<ClampScalarOpParams> tt::target::ttnn::CreateClampScalarOpParams | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
float | min = 0.0f , |
||
float | max = 0.0f |
||
) |
inline ::flatbuffers::Offset<ClampTensorOpParams> tt::target::ttnn::CreateClampTensorOpParams | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | min = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | max = 0 |
||
) |
inline ::flatbuffers::Offset<CollectivePermuteOp> tt::target::ttnn::CreateCollectivePermuteOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | source_target_pairs = 0 |
||
) |
inline ::flatbuffers::Offset<CollectivePermuteOp> tt::target::ttnn::CreateCollectivePermuteOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
const std::vector< int64_t > * | source_target_pairs = nullptr |
||
) |
inline ::flatbuffers::Offset<ConcatOp> tt::target::ttnn::CreateConcatOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> | inputs = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
int32_t | dim = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 |
||
) |
inline ::flatbuffers::Offset<ConcatOp> tt::target::ttnn::CreateConcatOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> * | inputs = nullptr , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
int32_t | dim = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 |
||
) |
inline ::flatbuffers::Offset<ConstantOp> tt::target::ttnn::CreateConstantOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< uint8_t >> | data = 0 |
||
) |
inline ::flatbuffers::Offset<ConstantOp> tt::target::ttnn::CreateConstantOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const std::vector< uint8_t > * | data = nullptr |
||
) |
inline ::flatbuffers::Offset<ConstructTensorOp> tt::target::ttnn::CreateConstructTensorOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | shape = 0 , |
||
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
tt::target::TensorLayout | layout = tt::target::TensorLayout::RowMajor , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<ConstructTensorOp> tt::target::ttnn::CreateConstructTensorOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector< int64_t > * | shape = nullptr , |
||
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
tt::target::TensorLayout | layout = tt::target::TensorLayout::RowMajor , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<Conv2dConfig> tt::target::ttnn::CreateConv2dConfig | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
tt::target::DataType | weights_dtype = tt::target::DataType::Float32 , |
||
::flatbuffers::Offset<::flatbuffers::String > | activation = 0 , |
||
uint32_t | input_channels_alignment = 0 , |
||
bool | deallocate_activation = false , |
||
bool | reallocate_halo_output = false , |
||
uint32_t | act_block_h_override = 0 , |
||
uint32_t | act_block_w_div = 0 , |
||
bool | reshard_if_not_optimal = false , |
||
bool | override_sharding_config = false , |
||
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > | shard_layout = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > | core_grid = 0 , |
||
bool | transpose_shards = false , |
||
tt::target::TensorLayout | output_layout = tt::target::TensorLayout::RowMajor , |
||
bool | preprocess_weights_on_device = false , |
||
bool | always_preprocess_weights = false , |
||
bool | enable_act_double_buffer = false , |
||
bool | enable_weights_double_buffer = false , |
||
bool | enable_split_reader = false , |
||
bool | enable_subblock_padding = false |
||
) |
inline ::flatbuffers::Offset<Conv2dConfig> tt::target::ttnn::CreateConv2dConfigDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
tt::target::DataType | weights_dtype = tt::target::DataType::Float32 , |
||
const char * | activation = nullptr , |
||
uint32_t | input_channels_alignment = 0 , |
||
bool | deallocate_activation = false , |
||
bool | reallocate_halo_output = false , |
||
uint32_t | act_block_h_override = 0 , |
||
uint32_t | act_block_w_div = 0 , |
||
bool | reshard_if_not_optimal = false , |
||
bool | override_sharding_config = false , |
||
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > | shard_layout = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > | core_grid = 0 , |
||
bool | transpose_shards = false , |
||
tt::target::TensorLayout | output_layout = tt::target::TensorLayout::RowMajor , |
||
bool | preprocess_weights_on_device = false , |
||
bool | always_preprocess_weights = false , |
||
bool | enable_act_double_buffer = false , |
||
bool | enable_weights_double_buffer = false , |
||
bool | enable_split_reader = false , |
||
bool | enable_subblock_padding = false |
||
) |
inline ::flatbuffers::Offset<Conv2dOp> tt::target::ttnn::CreateConv2dOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | bias = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
uint32_t | in_channels = 0 , |
||
uint32_t | out_channels = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | kernel_size = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | stride = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | padding = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | dilation = 0 , |
||
uint32_t | groups = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > | conv2d_config = 0 |
||
) |
inline ::flatbuffers::Offset<Conv2dOp> tt::target::ttnn::CreateConv2dOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | bias = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
uint32_t | in_channels = 0 , |
||
uint32_t | out_channels = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
const std::vector< int32_t > * | kernel_size = nullptr , |
||
const std::vector< int32_t > * | stride = nullptr , |
||
const std::vector< int32_t > * | padding = nullptr , |
||
const std::vector< int32_t > * | dilation = nullptr , |
||
uint32_t | groups = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > | conv2d_config = 0 |
||
) |
inline ::flatbuffers::Offset<ConvTranspose2dOp> tt::target::ttnn::CreateConvTranspose2dOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | bias = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
uint32_t | in_channels = 0 , |
||
uint32_t | out_channels = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | kernel_size = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | stride = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | padding = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | output_padding = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | dilation = 0 , |
||
uint32_t | groups = 0 |
||
) |
inline ::flatbuffers::Offset<ConvTranspose2dOp> tt::target::ttnn::CreateConvTranspose2dOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | bias = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
uint32_t | in_channels = 0 , |
||
uint32_t | out_channels = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
const std::vector< int32_t > * | kernel_size = nullptr , |
||
const std::vector< int32_t > * | stride = nullptr , |
||
const std::vector< int32_t > * | padding = nullptr , |
||
const std::vector< int32_t > * | output_padding = nullptr , |
||
const std::vector< int32_t > * | dilation = nullptr , |
||
uint32_t | groups = 0 |
||
) |
inline ::flatbuffers::Offset<CoreRangeSet> tt::target::ttnn::CreateCoreRangeSet | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::ttnn::CoreRange * >> | core_ranges = 0 |
||
) |
inline ::flatbuffers::Offset<CoreRangeSet> tt::target::ttnn::CreateCoreRangeSetDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector< tt::target::ttnn::CoreRange > * | core_ranges = nullptr |
||
) |
inline ::flatbuffers::Offset<CpuOp> tt::target::ttnn::CreateCpuOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> | ins = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::String > | func_name = 0 , |
||
uint32_t | dylib_id = 0 |
||
) |
inline ::flatbuffers::Offset<CpuOp> tt::target::ttnn::CreateCpuOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> * | ins = nullptr , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const char * | func_name = nullptr , |
||
uint32_t | dylib_id = 0 |
||
) |
inline ::flatbuffers::Offset<DeallocateOp> tt::target::ttnn::CreateDeallocateOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
bool | force = false |
||
) |
inline ::flatbuffers::Offset<DistributionStrategy> tt::target::ttnn::CreateDistributionStrategy | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::DistributedTensorConfig | strategy_type = tt::target::ttnn::DistributedTensorConfig::NONE , |
||
::flatbuffers::Offset< void > | strategy = 0 |
||
) |
inline ::flatbuffers::Offset<EltwiseBinaryCompositeOp> tt::target::ttnn::CreateEltwiseBinaryCompositeOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::EltwiseBinaryCompositeOpType | type = tt::target::ttnn::EltwiseBinaryCompositeOpType::Maximum , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | lhs = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | rhs = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<EltwiseBinaryOp> tt::target::ttnn::CreateEltwiseBinaryOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::EltwiseBinaryOpType | type = tt::target::ttnn::EltwiseBinaryOpType::Add , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | lhs = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | rhs = 0 , |
||
::flatbuffers::Optional< tt::target::DataType > | output_dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<EltwiseOpWithFloatParams> tt::target::ttnn::CreateEltwiseOpWithFloatParams | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
float | parameter = 0.0f |
||
) |
inline ::flatbuffers::Offset<EltwiseQuantizationOp> tt::target::ttnn::CreateEltwiseQuantizationOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::EltwiseQuantizationOpType | type = tt::target::ttnn::EltwiseQuantizationOpType::Quantize , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Optional< int32_t > | axis = ::flatbuffers::nullopt , |
||
::flatbuffers::Optional< tt::target::DataType > | output_dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
tt::target::ttnn::EltwiseQuantizationOpParams | params_type = tt::target::ttnn::EltwiseQuantizationOpParams::NONE , |
||
::flatbuffers::Offset< void > | params = 0 |
||
) |
inline ::flatbuffers::Offset<EltwiseTernaryWhereOp> tt::target::ttnn::CreateEltwiseTernaryWhereOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | first = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | second = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | third = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<EltwiseUnaryCompositeOp> tt::target::ttnn::CreateEltwiseUnaryCompositeOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::EltwiseUnaryCompositeOpType | type = tt::target::ttnn::EltwiseUnaryCompositeOpType::Cbrt , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
tt::target::ttnn::EltwiseUnaryCompositeOpParams | params_type = tt::target::ttnn::EltwiseUnaryCompositeOpParams::NONE , |
||
::flatbuffers::Offset< void > | params = 0 |
||
) |
inline ::flatbuffers::Offset<EltwiseUnaryOp> tt::target::ttnn::CreateEltwiseUnaryOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::EltwiseUnaryOpType | type = tt::target::ttnn::EltwiseUnaryOpType::Abs , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
tt::target::ttnn::EltwiseUnaryOpParams | params_type = tt::target::ttnn::EltwiseUnaryOpParams::NONE , |
||
::flatbuffers::Offset< void > | params = 0 |
||
) |
inline ::flatbuffers::Offset<EmbeddingBackwardOp> tt::target::ttnn::CreateEmbeddingBackwardOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in_grad = 0 , |
||
::flatbuffers::Optional< tt::target::DataType > | dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<EmbeddingOp> tt::target::ttnn::CreateEmbeddingOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<EmptyOp> tt::target::ttnn::CreateEmptyOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | shape = 0 , |
||
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
tt::target::TensorLayout | layout = tt::target::TensorLayout::RowMajor , |
||
uint32_t | num_shards = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > | strategy = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<EmptyOp> tt::target::ttnn::CreateEmptyOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector< int64_t > * | shape = nullptr , |
||
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
tt::target::TensorLayout | layout = tt::target::TensorLayout::RowMajor , |
||
uint32_t | num_shards = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > | strategy = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<FillCacheOp> tt::target::ttnn::CreateFillCacheOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | cache = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
uint32_t | batch_offset = 0 |
||
) |
inline ::flatbuffers::Offset<FromDeviceOp> tt::target::ttnn::CreateFromDeviceOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<FullOp> tt::target::ttnn::CreateFullOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
float | fill_value = 0.0f , |
||
uint32_t | num_shards = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > | strategy = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<GetDeviceOp> tt::target::ttnn::CreateGetDeviceOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::Dim2d * | mesh = nullptr , |
||
const tt::target::Dim2d * | offset = nullptr , |
||
::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >> | chip_ids = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<GetDeviceOp> tt::target::ttnn::CreateGetDeviceOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::Dim2d * | mesh = nullptr , |
||
const tt::target::Dim2d * | offset = nullptr , |
||
const std::vector< uint32_t > * | chip_ids = nullptr , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<LayoutDesc> tt::target::ttnn::CreateLayoutDesc | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::OOBVal | oob_val = tt::target::OOBVal::Undef , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryDesc > | memory_desc = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::DistributionStrategy > | strategy = 0 |
||
) |
inline ::flatbuffers::Offset<LinearOp> tt::target::ttnn::CreateLinearOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | a = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | b = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | bias = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
bool | transpose_a = false , |
||
bool | transpose_b = false |
||
) |
inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCast1DProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCast1DProgramConfig | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::ttnn::CoreCoord * | compute_with_storage_grid_size = nullptr , |
||
uint64_t | in0_block_w = 0 , |
||
uint64_t | out_subblock_h = 0 , |
||
uint64_t | out_subblock_w = 0 , |
||
uint64_t | out_block_h = 0 , |
||
uint64_t | out_block_w = 0 , |
||
uint64_t | per_core_m = 0 , |
||
uint64_t | per_core_n = 0 , |
||
bool | fuse_batch = false , |
||
::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > | fused_activation = 0 , |
||
bool | mcast_in0 = false , |
||
bool | gather_in0 = false , |
||
::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > | hop_cores = 0 , |
||
uint64_t | num_global_cb_receivers = 0 |
||
) |
inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCastDRAMShardedProgramConfig | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
uint64_t | in0_block_w = 0 , |
||
uint64_t | per_core_m = 0 , |
||
uint64_t | per_core_n = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > | fused_activation = 0 |
||
) |
inline ::flatbuffers::Offset<MatmulMultiCoreReuseMultiCastProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseMultiCastProgramConfig | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::ttnn::CoreCoord * | compute_with_storage_grid_size = nullptr , |
||
uint64_t | in0_block_w = 0 , |
||
uint64_t | out_subblock_h = 0 , |
||
uint64_t | out_subblock_w = 0 , |
||
uint64_t | out_block_h = 0 , |
||
uint64_t | out_block_w = 0 , |
||
uint64_t | per_core_m = 0 , |
||
uint64_t | per_core_n = 0 , |
||
bool | transpose_mcast = false , |
||
::flatbuffers::Offset< tt::target::ttnn::UnaryWithParam > | fused_activation = 0 , |
||
bool | fuse_batch = false |
||
) |
inline ::flatbuffers::Offset<MatmulMultiCoreReuseProgramConfig> tt::target::ttnn::CreateMatmulMultiCoreReuseProgramConfig | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::ttnn::CoreCoord * | compute_with_storage_grid_size = nullptr , |
||
uint64_t | in0_block_w = 0 , |
||
uint64_t | out_subblock_h = 0 , |
||
uint64_t | out_subblock_w = 0 , |
||
uint64_t | per_core_m = 0 , |
||
uint64_t | per_core_n = 0 |
||
) |
inline ::flatbuffers::Offset<MatmulOp> tt::target::ttnn::CreateMatmulOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | a = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | b = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
bool | transpose_a = false , |
||
bool | transpose_b = false , |
||
tt::target::ttnn::MatmulProgramConfig | matmul_program_config_type = tt::target::ttnn::MatmulProgramConfig::NONE , |
||
::flatbuffers::Offset< void > | matmul_program_config = 0 |
||
) |
inline ::flatbuffers::Offset<MaxPool2dOp> tt::target::ttnn::CreateMaxPool2dOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
uint32_t | channels = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | kernel_size = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | stride = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | padding = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | dilation = 0 , |
||
bool | ceil_mode = false |
||
) |
inline ::flatbuffers::Offset<MaxPool2dOp> tt::target::ttnn::CreateMaxPool2dOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
uint32_t | channels = 0 , |
||
const std::vector< int32_t > * | kernel_size = nullptr , |
||
const std::vector< int32_t > * | stride = nullptr , |
||
const std::vector< int32_t > * | padding = nullptr , |
||
const std::vector< int32_t > * | dilation = nullptr , |
||
bool | ceil_mode = false |
||
) |
inline ::flatbuffers::Offset<MemoryConfig> tt::target::ttnn::CreateMemoryConfig | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::TensorMemoryLayout | tensor_memory_layout = tt::target::ttnn::TensorMemoryLayout::Interleaved , |
||
tt::target::BufferType | buffer_type = tt::target::BufferType::DRAM , |
||
::flatbuffers::Offset< tt::target::ttnn::ShardSpec > | shard_spec = 0 |
||
) |
inline ::flatbuffers::Offset<MemoryDesc> tt::target::ttnn::CreateMemoryDesc | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::StorageType | storage_type = tt::target::ttnn::StorageType::Owned , |
||
const tt::target::Dim2d * | tile_shape = nullptr , |
||
tt::target::DataType | data_type = tt::target::DataType::Float32 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
uint64_t | size = 0 |
||
) |
inline ::flatbuffers::Offset<MeshShardOp> tt::target::ttnn::CreateMeshShardOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
tt::target::ttnn::MeshShardDirection | shard_direction = tt::target::ttnn::MeshShardDirection::FullToShardShape , |
||
tt::target::ttnn::MeshShardType | shard_type = tt::target::ttnn::MeshShardType::Identity , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | shard_shape = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | shard_dims = 0 |
||
) |
inline ::flatbuffers::Offset<MeshShardOp> tt::target::ttnn::CreateMeshShardOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
tt::target::ttnn::MeshShardDirection | shard_direction = tt::target::ttnn::MeshShardDirection::FullToShardShape , |
||
tt::target::ttnn::MeshShardType | shard_type = tt::target::ttnn::MeshShardType::Identity , |
||
const std::vector< int64_t > * | shard_shape = nullptr , |
||
const std::vector< int64_t > * | shard_dims = nullptr |
||
) |
inline ::flatbuffers::Offset<MorehCumSumOp> tt::target::ttnn::CreateMorehCumSumOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
int64_t | dim = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 |
||
) |
inline ::flatbuffers::Offset<NamedFullOp> tt::target::ttnn::CreateNamedFullOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::NamedFullOpType | type = tt::target::ttnn::NamedFullOpType::Zeros , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | shape = 0 , |
||
::flatbuffers::Optional< tt::target::DataType > | dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Optional< tt::target::TensorLayout > | layout = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<NamedFullOp> tt::target::ttnn::CreateNamedFullOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::NamedFullOpType | type = tt::target::ttnn::NamedFullOpType::Zeros , |
||
const std::vector< int64_t > * | shape = nullptr , |
||
::flatbuffers::Optional< tt::target::DataType > | dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Optional< tt::target::TensorLayout > | layout = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<NonUniformScale2D> tt::target::ttnn::CreateNonUniformScale2D | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | scale = 0 |
||
) |
inline ::flatbuffers::Offset<NonUniformScale2D> tt::target::ttnn::CreateNonUniformScale2DDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector< int32_t > * | scale = nullptr |
||
) |
inline ::flatbuffers::Offset<Operation> tt::target::ttnn::CreateOperation | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::OpType | type_type = tt::target::ttnn::OpType::NONE , |
||
::flatbuffers::Offset< void > | type = 0 , |
||
::flatbuffers::Offset<::flatbuffers::String > | debug_info = 0 , |
||
::flatbuffers::Offset<::flatbuffers::String > | loc_info = 0 |
||
) |
inline ::flatbuffers::Offset<Operation> tt::target::ttnn::CreateOperationDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::OpType | type_type = tt::target::ttnn::OpType::NONE , |
||
::flatbuffers::Offset< void > | type = 0 , |
||
const char * | debug_info = nullptr , |
||
const char * | loc_info = nullptr |
||
) |
inline ::flatbuffers::Offset<PadOp> tt::target::ttnn::CreatePadOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< uint32_t >> | padding = 0 , |
||
float | value = 0.0f , |
||
bool | use_multicore = false , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 |
||
) |
inline ::flatbuffers::Offset<PadOp> tt::target::ttnn::CreatePadOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const std::vector< uint32_t > * | padding = nullptr , |
||
float | value = 0.0f , |
||
bool | use_multicore = false , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 |
||
) |
inline ::flatbuffers::Offset<PermuteOp> tt::target::ttnn::CreatePermuteOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | permutation = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
float | pad_value = 0.0f , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<PermuteOp> tt::target::ttnn::CreatePermuteOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
const std::vector< int64_t > * | permutation = nullptr , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
float | pad_value = 0.0f , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> tt::target::ttnn::CreatePrepareConv2dWeightsOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight_tensor = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | input_memory_config = 0 , |
||
tt::target::TensorLayout | input_tensor_layout = tt::target::TensorLayout::RowMajor , |
||
::flatbuffers::Offset<::flatbuffers::String > | weights_format = 0 , |
||
uint32_t | in_channels = 0 , |
||
uint32_t | out_channels = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | kernel_size = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | stride = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | padding = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | dilation = 0 , |
||
bool | has_bias = false , |
||
uint32_t | groups = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > | conv2d_config = 0 |
||
) |
inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> tt::target::ttnn::CreatePrepareConv2dWeightsOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | weight_tensor = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | input_memory_config = 0 , |
||
tt::target::TensorLayout | input_tensor_layout = tt::target::TensorLayout::RowMajor , |
||
const char * | weights_format = nullptr , |
||
uint32_t | in_channels = 0 , |
||
uint32_t | out_channels = 0 , |
||
uint32_t | batch_size = 0 , |
||
uint32_t | input_height = 0 , |
||
uint32_t | input_width = 0 , |
||
const std::vector< int32_t > * | kernel_size = nullptr , |
||
const std::vector< int32_t > * | stride = nullptr , |
||
const std::vector< int32_t > * | padding = nullptr , |
||
const std::vector< int32_t > * | dilation = nullptr , |
||
bool | has_bias = false , |
||
uint32_t | groups = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > | conv2d_config = 0 |
||
) |
inline ::flatbuffers::Offset<Program> tt::target::ttnn::CreateProgram | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::String > | name = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> | inputs = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >>> | outputs = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Operation >>> | operations = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::DynamicLib >>> | dylibs = 0 , |
||
::flatbuffers::Offset< tt::target::DebugInfo > | debug_info = 0 |
||
) |
inline ::flatbuffers::Offset<Program> tt::target::ttnn::CreateProgramDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const char * | name = nullptr , |
||
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> * | inputs = nullptr , |
||
const std::vector<::flatbuffers::Offset< tt::target::ttnn::TensorRef >> * | outputs = nullptr , |
||
const std::vector<::flatbuffers::Offset< tt::target::ttnn::Operation >> * | operations = nullptr , |
||
const std::vector<::flatbuffers::Offset< tt::target::DynamicLib >> * | dylibs = nullptr , |
||
::flatbuffers::Offset< tt::target::DebugInfo > | debug_info = 0 |
||
) |
inline ::flatbuffers::Offset<QuantizeDequantizeOpParams> tt::target::ttnn::CreateQuantizeDequantizeOpParams | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
float | scale = 0.0f , |
||
int32_t | zero_point = 0 |
||
) |
inline ::flatbuffers::Offset<ReduceScatterOp> tt::target::ttnn::CreateReduceScatterOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
int32_t | scatter_dim = 0 , |
||
uint32_t | reduce_type = 0 , |
||
uint32_t | cluster_axis = 0 , |
||
uint32_t | num_links = 0 |
||
) |
inline ::flatbuffers::Offset<ReductionArgMaxOp> tt::target::ttnn::CreateReductionArgMaxOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Optional< int32_t > | dim = ::flatbuffers::nullopt , |
||
bool | use_multicore = false , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 |
||
) |
inline ::flatbuffers::Offset<ReductionOp> tt::target::ttnn::CreateReductionOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::ReductionOpType | type = tt::target::ttnn::ReductionOpType::Sum , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | dim_arg = 0 , |
||
bool | keep_dim = false |
||
) |
inline ::flatbuffers::Offset<ReductionOp> tt::target::ttnn::CreateReductionOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::ReductionOpType | type = tt::target::ttnn::ReductionOpType::Sum , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const std::vector< int32_t > * | dim_arg = nullptr , |
||
bool | keep_dim = false |
||
) |
inline ::flatbuffers::Offset<ReductionProdOp> tt::target::ttnn::CreateReductionProdOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
bool | all_dimensions = false , |
||
int64_t | dim_arg = 0 , |
||
bool | keep_dim = false , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 |
||
) |
inline ::flatbuffers::Offset<RepeatInterleaveOp> tt::target::ttnn::CreateRepeatInterleaveOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
uint32_t | repeats = 0 , |
||
int32_t | dim = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 |
||
) |
inline ::flatbuffers::Offset<RepeatOp> tt::target::ttnn::CreateRepeatOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | repeat_dims = 0 |
||
) |
inline ::flatbuffers::Offset<RepeatOp> tt::target::ttnn::CreateRepeatOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const std::vector< int64_t > * | repeat_dims = nullptr |
||
) |
inline ::flatbuffers::Offset<ReplicateTensor> tt::target::ttnn::CreateReplicateTensor | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
uint32_t | replication_factor = 0 |
||
) |
inline ::flatbuffers::Offset<RequantizeOpParams> tt::target::ttnn::CreateRequantizeOpParams | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
float | in_scale = 0.0f , |
||
int32_t | in_zero_point = 0 , |
||
float | out_scale = 0.0f , |
||
int32_t | out_zero_point = 0 |
||
) |
inline ::flatbuffers::Offset<ReshapeOp> tt::target::ttnn::CreateReshapeOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | shape = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 |
||
) |
inline ::flatbuffers::Offset<ReshapeOp> tt::target::ttnn::CreateReshapeOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const std::vector< int32_t > * | shape = nullptr , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 |
||
) |
inline ::flatbuffers::Offset<ShardSpec> tt::target::ttnn::CreateShardSpec | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector< const tt::target::Dim2dRange * >> | grid = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | shard_shape = 0 |
||
) |
inline ::flatbuffers::Offset<ShardSpec> tt::target::ttnn::CreateShardSpecDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector< tt::target::Dim2dRange > * | grid = nullptr , |
||
const std::vector< int32_t > * | shard_shape = nullptr |
||
) |
inline ::flatbuffers::Offset<ShardTensor> tt::target::ttnn::CreateShardTensor | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
uint32_t | shard_dim = 0 |
||
) |
inline ::flatbuffers::Offset<ShardTensor2D> tt::target::ttnn::CreateShardTensor2D | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::Dim2d * | shard_mesh = nullptr |
||
) |
inline ::flatbuffers::Offset<SliceOp> tt::target::ttnn::CreateSliceOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | begins = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | ends = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int64_t >> | step = 0 |
||
) |
inline ::flatbuffers::Offset<SliceOp> tt::target::ttnn::CreateSliceOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
const std::vector< int64_t > * | begins = nullptr , |
||
const std::vector< int64_t > * | ends = nullptr , |
||
const std::vector< int64_t > * | step = nullptr |
||
) |
inline ::flatbuffers::Offset<SoftmaxOp> tt::target::ttnn::CreateSoftmaxOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
int32_t | dimension = 0 |
||
) |
inline ::flatbuffers::Offset<TensorDesc> tt::target::ttnn::CreateTensorDesc | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | shape = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> | mesh_shape = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::LayoutDesc > | layout = 0 |
||
) |
inline ::flatbuffers::Offset<TensorDesc> tt::target::ttnn::CreateTensorDescDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const std::vector< int32_t > * | shape = nullptr , |
||
const std::vector< int32_t > * | mesh_shape = nullptr , |
||
::flatbuffers::Offset< tt::target::ttnn::LayoutDesc > | layout = 0 |
||
) |
inline ::flatbuffers::Offset<TensorRef> tt::target::ttnn::CreateTensorRef | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
uint32_t | global_id = 0 , |
||
uint64_t | size = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorDesc > | desc = 0 |
||
) |
inline ::flatbuffers::Offset<ToDeviceOp> tt::target::ttnn::CreateToDeviceOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<ToDTypeOp> tt::target::ttnn::CreateToDTypeOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<ToLayoutOp> tt::target::ttnn::CreateToLayoutOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
tt::target::TensorLayout | layout = tt::target::TensorLayout::RowMajor , |
||
::flatbuffers::Optional< tt::target::DataType > | dtype = ::flatbuffers::nullopt , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::DeviceRef > | device = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<ToMemoryConfigOp> tt::target::ttnn::CreateToMemoryConfigOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in0 = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memcfg = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<TransposeOp> tt::target::ttnn::CreateTransposeOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 , |
||
int32_t | dim0 = 0 , |
||
int32_t | dim1 = 0 |
||
) |
inline ::flatbuffers::Offset<TTNNBinary> tt::target::ttnn::CreateTTNNBinary | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::Version * | version = nullptr , |
||
::flatbuffers::Offset<::flatbuffers::String > | ttmlir_git_hash = 0 , |
||
::flatbuffers::Offset< tt::target::SystemDesc > | system_desc = 0 , |
||
::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset< tt::target::ttnn::Program >>> | programs = 0 |
||
) |
inline ::flatbuffers::Offset<TTNNBinary> tt::target::ttnn::CreateTTNNBinaryDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
const tt::target::Version * | version = nullptr , |
||
const char * | ttmlir_git_hash = nullptr , |
||
::flatbuffers::Offset< tt::target::SystemDesc > | system_desc = 0 , |
||
const std::vector<::flatbuffers::Offset< tt::target::ttnn::Program >> * | programs = nullptr |
||
) |
inline ::flatbuffers::Offset<TypecastOp> tt::target::ttnn::CreateTypecastOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
tt::target::DataType | dtype = tt::target::DataType::Float32 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<UnaryWithParam> tt::target::ttnn::CreateUnaryWithParam | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::UnaryOpType | op_type = tt::target::ttnn::UnaryOpType::Exp , |
||
::flatbuffers::Offset<::flatbuffers::Vector< double >> | params = 0 |
||
) |
inline ::flatbuffers::Offset<UnaryWithParam> tt::target::ttnn::CreateUnaryWithParamDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
tt::target::ttnn::UnaryOpType | op_type = tt::target::ttnn::UnaryOpType::Exp , |
||
const std::vector< double > * | params = nullptr |
||
) |
inline ::flatbuffers::Offset<UniformScale2D> tt::target::ttnn::CreateUniformScale2D | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
int32_t | scale = 0 |
||
) |
inline ::flatbuffers::Offset<UpdateCacheOp> tt::target::ttnn::CreateUpdateCacheOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | cache = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | input = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | update_index = 0 , |
||
uint32_t | batch_offset = 0 |
||
) |
inline ::flatbuffers::Offset<UpsampleOp> tt::target::ttnn::CreateUpsampleOp | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
tt::target::ttnn::Scale2D | scale_factor_type = tt::target::ttnn::Scale2D::NONE , |
||
::flatbuffers::Offset< void > | scale_factor = 0 , |
||
::flatbuffers::Offset<::flatbuffers::String > | mode = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
inline ::flatbuffers::Offset<UpsampleOp> tt::target::ttnn::CreateUpsampleOpDirect | ( | ::flatbuffers::FlatBufferBuilder & | _fbb, |
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | in = 0 , |
||
tt::target::ttnn::Scale2D | scale_factor_type = tt::target::ttnn::Scale2D::NONE , |
||
::flatbuffers::Offset< void > | scale_factor = 0 , |
||
const char * | mode = nullptr , |
||
::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > | memory_config = 0 , |
||
::flatbuffers::Offset< tt::target::ttnn::TensorRef > | out = 0 |
||
) |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
tt::target::ttnn::FLATBUFFERS_MANUALLY_ALIGNED_STRUCT | ( | 8 | ) |
tt::target::ttnn::FLATBUFFERS_STRUCT_END | ( | CoreCoord | , |
16 | |||
) |
tt::target::ttnn::FLATBUFFERS_STRUCT_END | ( | CoreRange | , |
32 | |||
) |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |