TT-MLIR
normalization_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_NORMALIZATION_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_NORMALIZATION_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
18 
19 namespace tt {
20 namespace target {
21 namespace ttnn {
22 
23 struct SoftmaxOp;
24 struct SoftmaxOpBuilder;
25 
26 struct BatchNormOp;
27 struct BatchNormOpBuilder;
28 
29 struct SoftmaxOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
31  struct Traits;
32  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
33  return "tt.target.ttnn.SoftmaxOp";
34  }
35  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
36  VT_IN = 4,
37  VT_OUT = 6,
38  VT_DIMENSION = 8
39  };
40  const tt::target::ttnn::TensorRef *in() const {
41  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
42  }
43  const tt::target::ttnn::TensorRef *out() const {
44  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
45  }
46  int32_t dimension() const {
47  return GetField<int32_t>(VT_DIMENSION, 0);
48  }
49  bool Verify(::flatbuffers::Verifier &verifier) const {
50  return VerifyTableStart(verifier) &&
51  VerifyOffset(verifier, VT_IN) &&
52  verifier.VerifyTable(in()) &&
53  VerifyOffset(verifier, VT_OUT) &&
54  verifier.VerifyTable(out()) &&
55  VerifyField<int32_t>(verifier, VT_DIMENSION, 4) &&
56  verifier.EndTable();
57  }
58 };
59 
61  typedef SoftmaxOp Table;
62  ::flatbuffers::FlatBufferBuilder &fbb_;
63  ::flatbuffers::uoffset_t start_;
64  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
65  fbb_.AddOffset(SoftmaxOp::VT_IN, in);
66  }
67  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
68  fbb_.AddOffset(SoftmaxOp::VT_OUT, out);
69  }
70  void add_dimension(int32_t dimension) {
71  fbb_.AddElement<int32_t>(SoftmaxOp::VT_DIMENSION, dimension, 0);
72  }
73  explicit SoftmaxOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
74  : fbb_(_fbb) {
75  start_ = fbb_.StartTable();
76  }
77  ::flatbuffers::Offset<SoftmaxOp> Finish() {
78  const auto end = fbb_.EndTable(start_);
79  auto o = ::flatbuffers::Offset<SoftmaxOp>(end);
80  return o;
81  }
82 };
83 
84 inline ::flatbuffers::Offset<SoftmaxOp> CreateSoftmaxOp(
85  ::flatbuffers::FlatBufferBuilder &_fbb,
86  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
87  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
88  int32_t dimension = 0) {
89  SoftmaxOpBuilder builder_(_fbb);
90  builder_.add_dimension(dimension);
91  builder_.add_out(out);
92  builder_.add_in(in);
93  return builder_.Finish();
94 }
95 
97  using type = SoftmaxOp;
98  static auto constexpr Create = CreateSoftmaxOp;
99 };
100 
101 struct BatchNormOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
103  struct Traits;
104  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
105  return "tt.target.ttnn.BatchNormOp";
106  }
107  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
108  VT_INPUT = 4,
114  VT_WEIGHT = 16,
115  VT_BIAS = 18,
117  VT_OUT = 22
118  };
119  const tt::target::ttnn::TensorRef *input() const {
120  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_INPUT);
121  }
122  const tt::target::ttnn::TensorRef *running_mean() const {
123  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_RUNNING_MEAN);
124  }
125  const tt::target::ttnn::TensorRef *running_var() const {
126  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_RUNNING_VAR);
127  }
128  bool training() const {
129  return GetField<uint8_t>(VT_TRAINING, 0) != 0;
130  }
131  float epsilon() const {
132  return GetField<float>(VT_EPSILON, 0.0f);
133  }
134  float momentum() const {
135  return GetField<float>(VT_MOMENTUM, 0.0f);
136  }
137  const tt::target::ttnn::TensorRef *weight() const {
138  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT);
139  }
140  const tt::target::ttnn::TensorRef *bias() const {
141  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
142  }
143  const tt::target::ttnn::MemoryConfig *memory_config() const {
144  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_MEMORY_CONFIG);
145  }
146  const tt::target::ttnn::TensorRef *out() const {
147  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
148  }
149  bool Verify(::flatbuffers::Verifier &verifier) const {
150  return VerifyTableStart(verifier) &&
151  VerifyOffset(verifier, VT_INPUT) &&
152  verifier.VerifyTable(input()) &&
153  VerifyOffset(verifier, VT_RUNNING_MEAN) &&
154  verifier.VerifyTable(running_mean()) &&
155  VerifyOffset(verifier, VT_RUNNING_VAR) &&
156  verifier.VerifyTable(running_var()) &&
157  VerifyField<uint8_t>(verifier, VT_TRAINING, 1) &&
158  VerifyField<float>(verifier, VT_EPSILON, 4) &&
159  VerifyField<float>(verifier, VT_MOMENTUM, 4) &&
160  VerifyOffset(verifier, VT_WEIGHT) &&
161  verifier.VerifyTable(weight()) &&
162  VerifyOffset(verifier, VT_BIAS) &&
163  verifier.VerifyTable(bias()) &&
164  VerifyOffset(verifier, VT_MEMORY_CONFIG) &&
165  verifier.VerifyTable(memory_config()) &&
166  VerifyOffset(verifier, VT_OUT) &&
167  verifier.VerifyTable(out()) &&
168  verifier.EndTable();
169  }
170 };
171 
174  ::flatbuffers::FlatBufferBuilder &fbb_;
175  ::flatbuffers::uoffset_t start_;
176  void add_input(::flatbuffers::Offset<tt::target::ttnn::TensorRef> input) {
177  fbb_.AddOffset(BatchNormOp::VT_INPUT, input);
178  }
179  void add_running_mean(::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_mean) {
180  fbb_.AddOffset(BatchNormOp::VT_RUNNING_MEAN, running_mean);
181  }
182  void add_running_var(::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_var) {
183  fbb_.AddOffset(BatchNormOp::VT_RUNNING_VAR, running_var);
184  }
185  void add_training(bool training) {
186  fbb_.AddElement<uint8_t>(BatchNormOp::VT_TRAINING, static_cast<uint8_t>(training), 0);
187  }
188  void add_epsilon(float epsilon) {
189  fbb_.AddElement<float>(BatchNormOp::VT_EPSILON, epsilon, 0.0f);
190  }
191  void add_momentum(float momentum) {
192  fbb_.AddElement<float>(BatchNormOp::VT_MOMENTUM, momentum, 0.0f);
193  }
194  void add_weight(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight) {
195  fbb_.AddOffset(BatchNormOp::VT_WEIGHT, weight);
196  }
197  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
198  fbb_.AddOffset(BatchNormOp::VT_BIAS, bias);
199  }
200  void add_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config) {
201  fbb_.AddOffset(BatchNormOp::VT_MEMORY_CONFIG, memory_config);
202  }
203  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
204  fbb_.AddOffset(BatchNormOp::VT_OUT, out);
205  }
206  explicit BatchNormOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
207  : fbb_(_fbb) {
208  start_ = fbb_.StartTable();
209  }
210  ::flatbuffers::Offset<BatchNormOp> Finish() {
211  const auto end = fbb_.EndTable(start_);
212  auto o = ::flatbuffers::Offset<BatchNormOp>(end);
213  return o;
214  }
215 };
216 
217 inline ::flatbuffers::Offset<BatchNormOp> CreateBatchNormOp(
218  ::flatbuffers::FlatBufferBuilder &_fbb,
219  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
220  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_mean = 0,
221  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> running_var = 0,
222  bool training = false,
223  float epsilon = 0.0f,
224  float momentum = 0.0f,
225  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
226  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
227  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
228  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0) {
229  BatchNormOpBuilder builder_(_fbb);
230  builder_.add_out(out);
231  builder_.add_memory_config(memory_config);
232  builder_.add_bias(bias);
233  builder_.add_weight(weight);
234  builder_.add_momentum(momentum);
235  builder_.add_epsilon(epsilon);
236  builder_.add_running_var(running_var);
237  builder_.add_running_mean(running_mean);
238  builder_.add_input(input);
239  builder_.add_training(training);
240  return builder_.Finish();
241 }
242 
244  using type = BatchNormOp;
245  static auto constexpr Create = CreateBatchNormOp;
246 };
247 
248 } // namespace ttnn
249 } // namespace target
250 } // namespace tt
251 
252 #endif // FLATBUFFERS_GENERATED_NORMALIZATION_TT_TARGET_TTNN_H_
inline ::flatbuffers::Offset< BatchNormOp > CreateBatchNormOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_mean=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_var=0, bool training=false, float epsilon=0.0f, float momentum=0.0f, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
Definition: normalization_generated.h:217
inline ::flatbuffers::Offset< SoftmaxOp > CreateSoftmaxOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, int32_t dimension=0)
Definition: normalization_generated.h:84
Definition: debug_info_generated.h:18
VT_INPUT
Definition: normalization_generated.h:108
VT_WEIGHT
Definition: normalization_generated.h:114
VT_TRAINING
Definition: normalization_generated.h:111
VT_EPSILON
Definition: normalization_generated.h:112
VT_RUNNING_VAR
Definition: normalization_generated.h:110
VT_IN
Definition: normalization_generated.h:36
VT_MOMENTUM
Definition: normalization_generated.h:113
VT_BIAS
Definition: normalization_generated.h:115
VT_OUT
Definition: normalization_generated.h:37
VT_RUNNING_MEAN
Definition: normalization_generated.h:109
VT_MEMORY_CONFIG
Definition: normalization_generated.h:116
Definition: normalization_generated.h:172
void add_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config)
Definition: normalization_generated.h:200
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: normalization_generated.h:203
void add_epsilon(float epsilon)
Definition: normalization_generated.h:188
::flatbuffers::uoffset_t start_
Definition: normalization_generated.h:175
void add_momentum(float momentum)
Definition: normalization_generated.h:191
void add_running_mean(::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_mean)
Definition: normalization_generated.h:179
BatchNormOp Table
Definition: normalization_generated.h:173
void add_training(bool training)
Definition: normalization_generated.h:185
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: normalization_generated.h:197
BatchNormOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: normalization_generated.h:206
void add_input(::flatbuffers::Offset< tt::target::ttnn::TensorRef > input)
Definition: normalization_generated.h:176
void add_weight(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight)
Definition: normalization_generated.h:194
::flatbuffers::Offset< BatchNormOp > Finish()
Definition: normalization_generated.h:210
::flatbuffers::FlatBufferBuilder & fbb_
Definition: normalization_generated.h:174
void add_running_var(::flatbuffers::Offset< tt::target::ttnn::TensorRef > running_var)
Definition: normalization_generated.h:182
Definition: normalization_generated.h:243
BatchNormOp type
Definition: normalization_generated.h:244
static constexpr auto Create
Definition: normalization_generated.h:245
Definition: binary_generated.h:31
const tt::target::ttnn::TensorRef * running_mean() const
Definition: normalization_generated.h:122
const tt::target::ttnn::TensorRef * bias() const
Definition: normalization_generated.h:140
const tt::target::ttnn::TensorRef * out() const
Definition: normalization_generated.h:43
const tt::target::ttnn::MemoryConfig * memory_config() const
Definition: normalization_generated.h:143
const tt::target::ttnn::TensorRef * running_var() const
Definition: normalization_generated.h:125
float momentum() const
Definition: normalization_generated.h:134
const tt::target::ttnn::TensorRef * input() const
Definition: normalization_generated.h:119
bool training() const
Definition: normalization_generated.h:128
const tt::target::ttnn::TensorRef * weight() const
Definition: normalization_generated.h:137
int32_t dimension() const
Definition: normalization_generated.h:46
const tt::target::ttnn::TensorRef * in() const
Definition: normalization_generated.h:40
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: normalization_generated.h:49
SoftmaxOpBuilder Builder
Definition: normalization_generated.h:30
BatchNormOpBuilder Builder
Definition: normalization_generated.h:102
float epsilon() const
Definition: normalization_generated.h:131
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: normalization_generated.h:32
Definition: normalization_generated.h:60
void add_dimension(int32_t dimension)
Definition: normalization_generated.h:70
::flatbuffers::uoffset_t start_
Definition: normalization_generated.h:63
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: normalization_generated.h:67
SoftmaxOp Table
Definition: normalization_generated.h:61
::flatbuffers::FlatBufferBuilder & fbb_
Definition: normalization_generated.h:62
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: normalization_generated.h:64
::flatbuffers::Offset< SoftmaxOp > Finish()
Definition: normalization_generated.h:77
SoftmaxOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: normalization_generated.h:73
Definition: normalization_generated.h:96
static constexpr auto Create
Definition: normalization_generated.h:98
SoftmaxOp type
Definition: normalization_generated.h:97