4 #ifndef FLATBUFFERS_GENERATED_NORMALIZATION_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_NORMALIZATION_TT_TARGET_TTNN_H_
7 #include "flatbuffers/flatbuffers.h"
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12 FLATBUFFERS_VERSION_MINOR == 3 &&
13 FLATBUFFERS_VERSION_REVISION == 25,
14 "Non-compatible flatbuffers version included");
24 struct SoftmaxOpBuilder;
27 struct BatchNormOpBuilder;
29 struct SoftmaxOp FLATBUFFERS_FINAL_CLASS :
private ::flatbuffers::Table {
33 return "tt.target.ttnn.SoftmaxOp";
35 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
40 const tt::target::ttnn::TensorRef *
in()
const {
41 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_IN);
43 const tt::target::ttnn::TensorRef *
out()
const {
44 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_OUT);
47 return GetField<int32_t>(VT_DIMENSION, 0);
49 bool Verify(::flatbuffers::Verifier &verifier)
const {
50 return VerifyTableStart(verifier) &&
51 VerifyOffset(verifier,
VT_IN) &&
52 verifier.VerifyTable(in()) &&
53 VerifyOffset(verifier,
VT_OUT) &&
54 verifier.VerifyTable(out()) &&
55 VerifyField<int32_t>(verifier, VT_DIMENSION, 4) &&
62 ::flatbuffers::FlatBufferBuilder &
fbb_;
64 void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
67 void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
71 fbb_.AddElement<int32_t>(SoftmaxOp::VT_DIMENSION, dimension, 0);
77 ::flatbuffers::Offset<SoftmaxOp>
Finish() {
79 auto o = ::flatbuffers::Offset<SoftmaxOp>(end);
85 ::flatbuffers::FlatBufferBuilder &_fbb,
86 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
87 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
88 int32_t dimension = 0) {
105 return "tt.target.ttnn.BatchNormOp";
107 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
119 const tt::target::ttnn::TensorRef *
input()
const {
120 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_INPUT);
123 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_RUNNING_MEAN);
126 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_RUNNING_VAR);
137 const tt::target::ttnn::TensorRef *
weight()
const {
138 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_WEIGHT);
140 const tt::target::ttnn::TensorRef *
bias()
const {
141 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_BIAS);
144 return GetPointer<const tt::target::ttnn::MemoryConfig *>(
VT_MEMORY_CONFIG);
146 const tt::target::ttnn::TensorRef *
out()
const {
147 return GetPointer<const tt::target::ttnn::TensorRef *>(
VT_OUT);
149 bool Verify(::flatbuffers::Verifier &verifier)
const {
150 return VerifyTableStart(verifier) &&
152 verifier.VerifyTable(input()) &&
154 verifier.VerifyTable(running_mean()) &&
156 verifier.VerifyTable(running_var()) &&
158 VerifyField<float>(verifier,
VT_EPSILON, 4) &&
161 verifier.VerifyTable(weight()) &&
162 VerifyOffset(verifier,
VT_BIAS) &&
163 verifier.VerifyTable(bias()) &&
165 verifier.VerifyTable(memory_config()) &&
166 VerifyOffset(verifier,
VT_OUT) &&
167 verifier.VerifyTable(out()) &&
174 ::flatbuffers::FlatBufferBuilder &
fbb_;
176 void add_input(::flatbuffers::Offset<tt::target::ttnn::TensorRef> input) {
182 void add_running_var(::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_var) {
194 void add_weight(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight) {
197 void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
203 void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
210 ::flatbuffers::Offset<BatchNormOp>
Finish() {
212 auto o = ::flatbuffers::Offset<BatchNormOp>(end);
218 ::flatbuffers::FlatBufferBuilder &_fbb,
219 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
220 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_mean = 0,
221 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_var = 0,
222 bool training =
false,
223 float epsilon = 0.0f,
224 float momentum = 0.0f,
225 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
226 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
227 ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
228 ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0) {
inline ::flatbuffers::Offset< BatchNormOp > CreateBatchNormOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_mean=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_var=0, bool training=false, float epsilon=0.0f, float momentum=0.0f, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
Definition: normalization_generated.h:217
inline ::flatbuffers::Offset< SoftmaxOp > CreateSoftmaxOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dimension=0)
Definition: normalization_generated.h:84
Definition: debug_info_generated.h:18
VT_INPUT
Definition: normalization_generated.h:108
VT_WEIGHT
Definition: normalization_generated.h:114
VT_TRAINING
Definition: normalization_generated.h:111
VT_EPSILON
Definition: normalization_generated.h:112
VT_RUNNING_VAR
Definition: normalization_generated.h:110
VT_IN
Definition: normalization_generated.h:36
VT_MOMENTUM
Definition: normalization_generated.h:113
VT_BIAS
Definition: normalization_generated.h:115
VT_OUT
Definition: normalization_generated.h:37
VT_RUNNING_MEAN
Definition: normalization_generated.h:109
VT_MEMORY_CONFIG
Definition: normalization_generated.h:116
Definition: normalization_generated.h:172
void add_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config)
Definition: normalization_generated.h:200
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: normalization_generated.h:203
void add_epsilon(float epsilon)
Definition: normalization_generated.h:188
::flatbuffers::uoffset_t start_
Definition: normalization_generated.h:175
void add_momentum(float momentum)
Definition: normalization_generated.h:191
void add_running_mean(::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_mean)
Definition: normalization_generated.h:179
BatchNormOp Table
Definition: normalization_generated.h:173
void add_training(bool training)
Definition: normalization_generated.h:185
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: normalization_generated.h:197
BatchNormOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: normalization_generated.h:206
void add_input(::flatbuffers::Offset< tt::target::ttnn::TensorRef > input)
Definition: normalization_generated.h:176
void add_weight(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight)
Definition: normalization_generated.h:194
::flatbuffers::Offset< BatchNormOp > Finish()
Definition: normalization_generated.h:210
::flatbuffers::FlatBufferBuilder & fbb_
Definition: normalization_generated.h:174
void add_running_var(::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_var)
Definition: normalization_generated.h:182
Definition: normalization_generated.h:243
BatchNormOp type
Definition: normalization_generated.h:244
static constexpr auto Create
Definition: normalization_generated.h:245
Definition: binary_generated.h:31
const tt::target::ttnn::TensorRef * running_mean() const
Definition: normalization_generated.h:122
const tt::target::ttnn::TensorRef * bias() const
Definition: normalization_generated.h:140
const tt::target::ttnn::TensorRef * out() const
Definition: normalization_generated.h:43
const tt::target::ttnn::MemoryConfig * memory_config() const
Definition: normalization_generated.h:143
const tt::target::ttnn::TensorRef * running_var() const
Definition: normalization_generated.h:125
float momentum() const
Definition: normalization_generated.h:134
const tt::target::ttnn::TensorRef * input() const
Definition: normalization_generated.h:119
bool training() const
Definition: normalization_generated.h:128
const tt::target::ttnn::TensorRef * weight() const
Definition: normalization_generated.h:137
int32_t dimension() const
Definition: normalization_generated.h:46
const tt::target::ttnn::TensorRef * in() const
Definition: normalization_generated.h:40
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: normalization_generated.h:49
SoftmaxOpBuilder Builder
Definition: normalization_generated.h:30
BatchNormOpBuilder Builder
Definition: normalization_generated.h:102
float epsilon() const
Definition: normalization_generated.h:131
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: normalization_generated.h:32
Definition: normalization_generated.h:60
void add_dimension(int32_t dimension)
Definition: normalization_generated.h:70
::flatbuffers::uoffset_t start_
Definition: normalization_generated.h:63
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: normalization_generated.h:67
SoftmaxOp Table
Definition: normalization_generated.h:61
::flatbuffers::FlatBufferBuilder & fbb_
Definition: normalization_generated.h:62
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: normalization_generated.h:64
::flatbuffers::Offset< SoftmaxOp > Finish()
Definition: normalization_generated.h:77
SoftmaxOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: normalization_generated.h:73
Definition: normalization_generated.h:96
static constexpr auto Create
Definition: normalization_generated.h:98
SoftmaxOp type
Definition: normalization_generated.h:97