TT-MLIR
pool_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_POOL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_POOL_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
18 
19 namespace tt {
20 namespace target {
21 namespace ttnn {
22 
23 struct Pool2dOp;
24 struct Pool2dOpBuilder;
25 
26 struct UniformScale2D;
27 struct UniformScale2DBuilder;
28 
29 struct NonUniformScale2D;
30 struct NonUniformScale2DBuilder;
31 
32 struct UpsampleOp;
33 struct UpsampleOpBuilder;
34 
35 enum class Pool2dOpType : uint32_t {
36  AvgPool2d = 0,
37  MaxPool2d = 1,
38  MIN = AvgPool2d,
39  MAX = MaxPool2d
40 };
41 
42 inline const Pool2dOpType (&EnumValuesPool2dOpType())[2] {
43  static const Pool2dOpType values[] = {
46  };
47  return values;
48 }
49 
50 inline const char * const *EnumNamesPool2dOpType() {
51  static const char * const names[3] = {
52  "AvgPool2d",
53  "MaxPool2d",
54  nullptr
55  };
56  return names;
57 }
58 
59 inline const char *EnumNamePool2dOpType(Pool2dOpType e) {
60  if (::flatbuffers::IsOutRange(e, Pool2dOpType::AvgPool2d, Pool2dOpType::MaxPool2d)) return "";
61  const size_t index = static_cast<size_t>(e);
62  return EnumNamesPool2dOpType()[index];
63 }
64 
65 enum class Scale2D : uint8_t {
66  NONE = 0,
67  UniformScale2D = 1,
69  MIN = NONE,
71 };
72 
73 inline const Scale2D (&EnumValuesScale2D())[3] {
74  static const Scale2D values[] = {
78  };
79  return values;
80 }
81 
82 inline const char * const *EnumNamesScale2D() {
83  static const char * const names[4] = {
84  "NONE",
85  "UniformScale2D",
86  "NonUniformScale2D",
87  nullptr
88  };
89  return names;
90 }
91 
92 inline const char *EnumNameScale2D(Scale2D e) {
93  if (::flatbuffers::IsOutRange(e, Scale2D::NONE, Scale2D::NonUniformScale2D)) return "";
94  const size_t index = static_cast<size_t>(e);
95  return EnumNamesScale2D()[index];
96 }
97 
98 template<typename T> struct Scale2DTraits {
100 };
101 
102 template<> struct Scale2DTraits<tt::target::ttnn::UniformScale2D> {
104 };
105 
106 template<> struct Scale2DTraits<tt::target::ttnn::NonUniformScale2D> {
108 };
109 
110 bool VerifyScale2D(::flatbuffers::Verifier &verifier, const void *obj, Scale2D type);
111 bool VerifyScale2DVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<Scale2D> *types);
112 
113 struct Pool2dOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
115  struct Traits;
116  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
117  return "tt.target.ttnn.Pool2dOp";
118  }
119  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
120  VT_TYPE = 4,
121  VT_IN = 6,
122  VT_OUT = 8,
128  VT_STRIDE = 20,
131  VT_CEIL_MODE = 26
132  };
134  return static_cast<tt::target::ttnn::Pool2dOpType>(GetField<uint32_t>(VT_TYPE, 0));
135  }
136  const tt::target::ttnn::TensorRef *in() const {
137  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
138  }
139  const tt::target::ttnn::TensorRef *out() const {
140  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
141  }
142  uint32_t batch_size() const {
143  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
144  }
145  uint32_t input_height() const {
146  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
147  }
148  uint32_t input_width() const {
149  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
150  }
151  uint32_t channels() const {
152  return GetField<uint32_t>(VT_CHANNELS, 0);
153  }
154  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
155  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
156  }
157  const ::flatbuffers::Vector<int32_t> *stride() const {
158  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
159  }
160  const ::flatbuffers::Vector<int32_t> *padding() const {
161  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
162  }
163  const ::flatbuffers::Vector<int32_t> *dilation() const {
164  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
165  }
166  bool ceil_mode() const {
167  return GetField<uint8_t>(VT_CEIL_MODE, 0) != 0;
168  }
169  bool Verify(::flatbuffers::Verifier &verifier) const {
170  return VerifyTableStart(verifier) &&
171  VerifyField<uint32_t>(verifier, VT_TYPE, 4) &&
172  VerifyOffset(verifier, VT_IN) &&
173  verifier.VerifyTable(in()) &&
174  VerifyOffset(verifier, VT_OUT) &&
175  verifier.VerifyTable(out()) &&
176  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
177  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
178  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
179  VerifyField<uint32_t>(verifier, VT_CHANNELS, 4) &&
180  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
181  verifier.VerifyVector(kernel_size()) &&
182  VerifyOffset(verifier, VT_STRIDE) &&
183  verifier.VerifyVector(stride()) &&
184  VerifyOffset(verifier, VT_PADDING) &&
185  verifier.VerifyVector(padding()) &&
186  VerifyOffset(verifier, VT_DILATION) &&
187  verifier.VerifyVector(dilation()) &&
188  VerifyField<uint8_t>(verifier, VT_CEIL_MODE, 1) &&
189  verifier.EndTable();
190  }
191 };
192 
194  typedef Pool2dOp Table;
195  ::flatbuffers::FlatBufferBuilder &fbb_;
196  ::flatbuffers::uoffset_t start_;
198  fbb_.AddElement<uint32_t>(Pool2dOp::VT_TYPE, static_cast<uint32_t>(type), 0);
199  }
200  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
201  fbb_.AddOffset(Pool2dOp::VT_IN, in);
202  }
203  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
204  fbb_.AddOffset(Pool2dOp::VT_OUT, out);
205  }
206  void add_batch_size(uint32_t batch_size) {
207  fbb_.AddElement<uint32_t>(Pool2dOp::VT_BATCH_SIZE, batch_size, 0);
208  }
209  void add_input_height(uint32_t input_height) {
210  fbb_.AddElement<uint32_t>(Pool2dOp::VT_INPUT_HEIGHT, input_height, 0);
211  }
212  void add_input_width(uint32_t input_width) {
213  fbb_.AddElement<uint32_t>(Pool2dOp::VT_INPUT_WIDTH, input_width, 0);
214  }
215  void add_channels(uint32_t channels) {
216  fbb_.AddElement<uint32_t>(Pool2dOp::VT_CHANNELS, channels, 0);
217  }
218  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
219  fbb_.AddOffset(Pool2dOp::VT_KERNEL_SIZE, kernel_size);
220  }
221  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
222  fbb_.AddOffset(Pool2dOp::VT_STRIDE, stride);
223  }
224  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
225  fbb_.AddOffset(Pool2dOp::VT_PADDING, padding);
226  }
227  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
228  fbb_.AddOffset(Pool2dOp::VT_DILATION, dilation);
229  }
230  void add_ceil_mode(bool ceil_mode) {
231  fbb_.AddElement<uint8_t>(Pool2dOp::VT_CEIL_MODE, static_cast<uint8_t>(ceil_mode), 0);
232  }
233  explicit Pool2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
234  : fbb_(_fbb) {
235  start_ = fbb_.StartTable();
236  }
237  ::flatbuffers::Offset<Pool2dOp> Finish() {
238  const auto end = fbb_.EndTable(start_);
239  auto o = ::flatbuffers::Offset<Pool2dOp>(end);
240  return o;
241  }
242 };
243 
244 inline ::flatbuffers::Offset<Pool2dOp> CreatePool2dOp(
245  ::flatbuffers::FlatBufferBuilder &_fbb,
247  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
248  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
249  uint32_t batch_size = 0,
250  uint32_t input_height = 0,
251  uint32_t input_width = 0,
252  uint32_t channels = 0,
253  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
254  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
255  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
256  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
257  bool ceil_mode = false) {
258  Pool2dOpBuilder builder_(_fbb);
259  builder_.add_dilation(dilation);
260  builder_.add_padding(padding);
261  builder_.add_stride(stride);
262  builder_.add_kernel_size(kernel_size);
263  builder_.add_channels(channels);
264  builder_.add_input_width(input_width);
265  builder_.add_input_height(input_height);
266  builder_.add_batch_size(batch_size);
267  builder_.add_out(out);
268  builder_.add_in(in);
269  builder_.add_type(type);
270  builder_.add_ceil_mode(ceil_mode);
271  return builder_.Finish();
272 }
273 
275  using type = Pool2dOp;
276  static auto constexpr Create = CreatePool2dOp;
277 };
278 
279 inline ::flatbuffers::Offset<Pool2dOp> CreatePool2dOpDirect(
280  ::flatbuffers::FlatBufferBuilder &_fbb,
282  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
283  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
284  uint32_t batch_size = 0,
285  uint32_t input_height = 0,
286  uint32_t input_width = 0,
287  uint32_t channels = 0,
288  const std::vector<int32_t> *kernel_size = nullptr,
289  const std::vector<int32_t> *stride = nullptr,
290  const std::vector<int32_t> *padding = nullptr,
291  const std::vector<int32_t> *dilation = nullptr,
292  bool ceil_mode = false) {
293  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
294  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
295  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
296  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
298  _fbb,
299  type,
300  in,
301  out,
302  batch_size,
303  input_height,
304  input_width,
305  channels,
306  kernel_size__,
307  stride__,
308  padding__,
309  dilation__,
310  ceil_mode);
311 }
312 
313 struct UniformScale2D FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
315  struct Traits;
316  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
317  return "tt.target.ttnn.UniformScale2D";
318  }
319  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
320  VT_SCALE = 4
321  };
322  int32_t scale() const {
323  return GetField<int32_t>(VT_SCALE, 0);
324  }
325  bool Verify(::flatbuffers::Verifier &verifier) const {
326  return VerifyTableStart(verifier) &&
327  VerifyField<int32_t>(verifier, VT_SCALE, 4) &&
328  verifier.EndTable();
329  }
330 };
331 
334  ::flatbuffers::FlatBufferBuilder &fbb_;
335  ::flatbuffers::uoffset_t start_;
336  void add_scale(int32_t scale) {
337  fbb_.AddElement<int32_t>(UniformScale2D::VT_SCALE, scale, 0);
338  }
339  explicit UniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
340  : fbb_(_fbb) {
341  start_ = fbb_.StartTable();
342  }
343  ::flatbuffers::Offset<UniformScale2D> Finish() {
344  const auto end = fbb_.EndTable(start_);
345  auto o = ::flatbuffers::Offset<UniformScale2D>(end);
346  return o;
347  }
348 };
349 
350 inline ::flatbuffers::Offset<UniformScale2D> CreateUniformScale2D(
351  ::flatbuffers::FlatBufferBuilder &_fbb,
352  int32_t scale = 0) {
353  UniformScale2DBuilder builder_(_fbb);
354  builder_.add_scale(scale);
355  return builder_.Finish();
356 }
357 
360  static auto constexpr Create = CreateUniformScale2D;
361 };
362 
363 struct NonUniformScale2D FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
365  struct Traits;
366  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
367  return "tt.target.ttnn.NonUniformScale2D";
368  }
369  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
370  VT_SCALE = 4
371  };
372  const ::flatbuffers::Vector<int32_t> *scale() const {
373  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SCALE);
374  }
375  bool Verify(::flatbuffers::Verifier &verifier) const {
376  return VerifyTableStart(verifier) &&
377  VerifyOffset(verifier, VT_SCALE) &&
378  verifier.VerifyVector(scale()) &&
379  verifier.EndTable();
380  }
381 };
382 
385  ::flatbuffers::FlatBufferBuilder &fbb_;
386  ::flatbuffers::uoffset_t start_;
387  void add_scale(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> scale) {
388  fbb_.AddOffset(NonUniformScale2D::VT_SCALE, scale);
389  }
390  explicit NonUniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
391  : fbb_(_fbb) {
392  start_ = fbb_.StartTable();
393  }
394  ::flatbuffers::Offset<NonUniformScale2D> Finish() {
395  const auto end = fbb_.EndTable(start_);
396  auto o = ::flatbuffers::Offset<NonUniformScale2D>(end);
397  return o;
398  }
399 };
400 
401 inline ::flatbuffers::Offset<NonUniformScale2D> CreateNonUniformScale2D(
402  ::flatbuffers::FlatBufferBuilder &_fbb,
403  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> scale = 0) {
404  NonUniformScale2DBuilder builder_(_fbb);
405  builder_.add_scale(scale);
406  return builder_.Finish();
407 }
408 
411  static auto constexpr Create = CreateNonUniformScale2D;
412 };
413 
414 inline ::flatbuffers::Offset<NonUniformScale2D> CreateNonUniformScale2DDirect(
415  ::flatbuffers::FlatBufferBuilder &_fbb,
416  const std::vector<int32_t> *scale = nullptr) {
417  auto scale__ = scale ? _fbb.CreateVector<int32_t>(*scale) : 0;
419  _fbb,
420  scale__);
421 }
422 
423 struct UpsampleOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
425  struct Traits;
426  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
427  return "tt.target.ttnn.UpsampleOp";
428  }
429  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
430  VT_IN = 4,
433  VT_MODE = 10,
435  VT_OUT = 14
436  };
437  const tt::target::ttnn::TensorRef *in() const {
438  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
439  }
441  return static_cast<tt::target::ttnn::Scale2D>(GetField<uint8_t>(VT_SCALE_FACTOR_TYPE, 0));
442  }
443  const void *scale_factor() const {
444  return GetPointer<const void *>(VT_SCALE_FACTOR);
445  }
446  template<typename T> const T *scale_factor_as() const;
447  const tt::target::ttnn::UniformScale2D *scale_factor_as_UniformScale2D() const {
448  return scale_factor_type() == tt::target::ttnn::Scale2D::UniformScale2D ? static_cast<const tt::target::ttnn::UniformScale2D *>(scale_factor()) : nullptr;
449  }
450  const tt::target::ttnn::NonUniformScale2D *scale_factor_as_NonUniformScale2D() const {
451  return scale_factor_type() == tt::target::ttnn::Scale2D::NonUniformScale2D ? static_cast<const tt::target::ttnn::NonUniformScale2D *>(scale_factor()) : nullptr;
452  }
453  const ::flatbuffers::String *mode() const {
454  return GetPointer<const ::flatbuffers::String *>(VT_MODE);
455  }
456  const tt::target::ttnn::MemoryConfig *memory_config() const {
457  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_MEMORY_CONFIG);
458  }
459  const tt::target::ttnn::TensorRef *out() const {
460  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
461  }
462  bool Verify(::flatbuffers::Verifier &verifier) const {
463  return VerifyTableStart(verifier) &&
464  VerifyOffset(verifier, VT_IN) &&
465  verifier.VerifyTable(in()) &&
466  VerifyField<uint8_t>(verifier, VT_SCALE_FACTOR_TYPE, 1) &&
467  VerifyOffset(verifier, VT_SCALE_FACTOR) &&
468  VerifyScale2D(verifier, scale_factor(), scale_factor_type()) &&
469  VerifyOffset(verifier, VT_MODE) &&
470  verifier.VerifyString(mode()) &&
471  VerifyOffset(verifier, VT_MEMORY_CONFIG) &&
472  verifier.VerifyTable(memory_config()) &&
473  VerifyOffset(verifier, VT_OUT) &&
474  verifier.VerifyTable(out()) &&
475  verifier.EndTable();
476  }
477 };
478 
479 template<> inline const tt::target::ttnn::UniformScale2D *UpsampleOp::scale_factor_as<tt::target::ttnn::UniformScale2D>() const {
480  return scale_factor_as_UniformScale2D();
481 }
482 
483 template<> inline const tt::target::ttnn::NonUniformScale2D *UpsampleOp::scale_factor_as<tt::target::ttnn::NonUniformScale2D>() const {
484  return scale_factor_as_NonUniformScale2D();
485 }
486 
488  typedef UpsampleOp Table;
489  ::flatbuffers::FlatBufferBuilder &fbb_;
490  ::flatbuffers::uoffset_t start_;
491  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
492  fbb_.AddOffset(UpsampleOp::VT_IN, in);
493  }
495  fbb_.AddElement<uint8_t>(UpsampleOp::VT_SCALE_FACTOR_TYPE, static_cast<uint8_t>(scale_factor_type), 0);
496  }
497  void add_scale_factor(::flatbuffers::Offset<void> scale_factor) {
498  fbb_.AddOffset(UpsampleOp::VT_SCALE_FACTOR, scale_factor);
499  }
500  void add_mode(::flatbuffers::Offset<::flatbuffers::String> mode) {
501  fbb_.AddOffset(UpsampleOp::VT_MODE, mode);
502  }
503  void add_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config) {
504  fbb_.AddOffset(UpsampleOp::VT_MEMORY_CONFIG, memory_config);
505  }
506  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
507  fbb_.AddOffset(UpsampleOp::VT_OUT, out);
508  }
509  explicit UpsampleOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
510  : fbb_(_fbb) {
511  start_ = fbb_.StartTable();
512  }
513  ::flatbuffers::Offset<UpsampleOp> Finish() {
514  const auto end = fbb_.EndTable(start_);
515  auto o = ::flatbuffers::Offset<UpsampleOp>(end);
516  return o;
517  }
518 };
519 
520 inline ::flatbuffers::Offset<UpsampleOp> CreateUpsampleOp(
521  ::flatbuffers::FlatBufferBuilder &_fbb,
522  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
524  ::flatbuffers::Offset<void> scale_factor = 0,
525  ::flatbuffers::Offset<::flatbuffers::String> mode = 0,
526  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
527  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0) {
528  UpsampleOpBuilder builder_(_fbb);
529  builder_.add_out(out);
530  builder_.add_memory_config(memory_config);
531  builder_.add_mode(mode);
532  builder_.add_scale_factor(scale_factor);
533  builder_.add_in(in);
534  builder_.add_scale_factor_type(scale_factor_type);
535  return builder_.Finish();
536 }
537 
539  using type = UpsampleOp;
540  static auto constexpr Create = CreateUpsampleOp;
541 };
542 
543 inline ::flatbuffers::Offset<UpsampleOp> CreateUpsampleOpDirect(
544  ::flatbuffers::FlatBufferBuilder &_fbb,
545  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
547  ::flatbuffers::Offset<void> scale_factor = 0,
548  const char *mode = nullptr,
549  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
550  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0) {
551  auto mode__ = mode ? _fbb.CreateString(mode) : 0;
553  _fbb,
554  in,
555  scale_factor_type,
556  scale_factor,
557  mode__,
558  memory_config,
559  out);
560 }
561 
562 inline bool VerifyScale2D(::flatbuffers::Verifier &verifier, const void *obj, Scale2D type) {
563  switch (type) {
564  case Scale2D::NONE: {
565  return true;
566  }
568  auto ptr = reinterpret_cast<const tt::target::ttnn::UniformScale2D *>(obj);
569  return verifier.VerifyTable(ptr);
570  }
572  auto ptr = reinterpret_cast<const tt::target::ttnn::NonUniformScale2D *>(obj);
573  return verifier.VerifyTable(ptr);
574  }
575  default: return true;
576  }
577 }
578 
579 inline bool VerifyScale2DVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<Scale2D> *types) {
580  if (!values || !types) return !values && !types;
581  if (values->size() != types->size()) return false;
582  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
583  if (!VerifyScale2D(
584  verifier, values->Get(i), types->GetEnum<Scale2D>(i))) {
585  return false;
586  }
587  }
588  return true;
589 }
590 
591 } // namespace ttnn
592 } // namespace target
593 } // namespace tt
594 
595 #endif // FLATBUFFERS_GENERATED_POOL_TT_TARGET_TTNN_H_
VT_SCALE
Definition: eltwise_generated.h:991
inline ::flatbuffers::Offset< UniformScale2D > CreateUniformScale2D(::flatbuffers::FlatBufferBuilder &_fbb, int32_t scale=0)
Definition: pool_generated.h:350
const char * EnumNamePool2dOpType(Pool2dOpType e)
Definition: pool_generated.h:59
const char * EnumNameScale2D(Scale2D e)
Definition: pool_generated.h:92
bool VerifyScale2DVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< Scale2D > *types)
Definition: pool_generated.h:579
const char *const * EnumNamesScale2D()
Definition: pool_generated.h:82
inline ::flatbuffers::Offset< Pool2dOp > CreatePool2dOp(::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::Pool2dOpType type=tt::target::ttnn::Pool2dOpType::AvgPool2d, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool ceil_mode=false)
Definition: pool_generated.h:244
inline ::flatbuffers::Offset< NonUniformScale2D > CreateNonUniformScale2DDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int32_t > *scale=nullptr)
Definition: pool_generated.h:414
const Scale2D(& EnumValuesScale2D())[3]
Definition: pool_generated.h:73
bool VerifyScale2D(::flatbuffers::Verifier &verifier, const void *obj, Scale2D type)
Definition: pool_generated.h:562
const char *const * EnumNamesPool2dOpType()
Definition: pool_generated.h:50
inline ::flatbuffers::Offset< NonUniformScale2D > CreateNonUniformScale2D(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> scale=0)
Definition: pool_generated.h:401
inline ::flatbuffers::Offset< UpsampleOp > CreateUpsampleOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, const char *mode=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
Definition: pool_generated.h:543
inline ::flatbuffers::Offset< UpsampleOp > CreateUpsampleOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, ::flatbuffers::Offset<::flatbuffers::String > mode=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
Definition: pool_generated.h:520
Pool2dOpType
Definition: pool_generated.h:35
Scale2D
Definition: pool_generated.h:65
inline ::flatbuffers::Offset< Pool2dOp > CreatePool2dOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::Pool2dOpType type=tt::target::ttnn::Pool2dOpType::AvgPool2d, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool ceil_mode=false)
Definition: pool_generated.h:279
const Pool2dOpType(& EnumValuesPool2dOpType())[2]
Definition: pool_generated.h:42
Definition: debug_info_generated.h:18
VT_DILATION
Definition: pool_generated.h:130
VT_PADDING
Definition: pool_generated.h:129
VT_SCALE_FACTOR_TYPE
Definition: pool_generated.h:431
VT_STRIDE
Definition: pool_generated.h:128
VT_CHANNELS
Definition: pool_generated.h:126
VT_MODE
Definition: pool_generated.h:433
VT_KERNEL_SIZE
Definition: pool_generated.h:127
VT_BATCH_SIZE
Definition: pool_generated.h:123
VT_SCALE_FACTOR
Definition: pool_generated.h:432
VT_TYPE
Definition: pool_generated.h:120
VT_INPUT_HEIGHT
Definition: pool_generated.h:124
VT_IN
Definition: pool_generated.h:121
VT_INPUT_WIDTH
Definition: pool_generated.h:125
VT_OUT
Definition: pool_generated.h:122
VT_MEMORY_CONFIG
Definition: pool_generated.h:434
Definition: debug_info_generated.h:36
Definition: binary_generated.h:31
const tt::target::ttnn::UniformScale2D * scale_factor_as_UniformScale2D() const
Definition: pool_generated.h:447
uint32_t batch_size() const
Definition: pool_generated.h:142
const tt::target::ttnn::NonUniformScale2D * scale_factor_as_NonUniformScale2D() const
Definition: pool_generated.h:450
const ::flatbuffers::Vector< int32_t > * scale() const
Definition: pool_generated.h:372
uint32_t input_width() const
Definition: pool_generated.h:148
const tt::target::ttnn::TensorRef * out() const
Definition: pool_generated.h:139
const tt::target::ttnn::MemoryConfig * memory_config() const
Definition: pool_generated.h:456
bool ceil_mode() const
Definition: pool_generated.h:166
tt::target::ttnn::Scale2D scale_factor_type() const
Definition: pool_generated.h:440
uint32_t channels() const
Definition: pool_generated.h:151
tt::target::ttnn::Pool2dOpType type() const
Definition: pool_generated.h:133
int32_t scale() const
Definition: pool_generated.h:322
UpsampleOpBuilder Builder
Definition: pool_generated.h:424
const ::flatbuffers::Vector< int32_t > * dilation() const
Definition: pool_generated.h:163
uint32_t input_height() const
Definition: pool_generated.h:145
const ::flatbuffers::Vector< int32_t > * stride() const
Definition: pool_generated.h:157
const ::flatbuffers::Vector< int32_t > * padding() const
Definition: pool_generated.h:160
const tt::target::ttnn::TensorRef * in() const
Definition: pool_generated.h:136
const ::flatbuffers::Vector< int32_t > * kernel_size() const
Definition: pool_generated.h:154
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: pool_generated.h:169
const void * scale_factor() const
Definition: pool_generated.h:443
Pool2dOpBuilder Builder
Definition: pool_generated.h:114
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: pool_generated.h:116
NonUniformScale2DBuilder Builder
Definition: pool_generated.h:364
const ::flatbuffers::String * mode() const
Definition: pool_generated.h:453
UniformScale2DBuilder Builder
Definition: pool_generated.h:314
Definition: pool_generated.h:383
void add_scale(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> scale)
Definition: pool_generated.h:387
::flatbuffers::Offset< NonUniformScale2D > Finish()
Definition: pool_generated.h:394
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:386
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:385
NonUniformScale2D Table
Definition: pool_generated.h:384
NonUniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:390
Definition: pool_generated.h:409
static constexpr auto Create
Definition: pool_generated.h:411
NonUniformScale2D type
Definition: pool_generated.h:410
Definition: pool_generated.h:193
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: pool_generated.h:227
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: pool_generated.h:224
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:196
Pool2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:233
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:195
void add_batch_size(uint32_t batch_size)
Definition: pool_generated.h:206
void add_input_height(uint32_t input_height)
Definition: pool_generated.h:209
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: pool_generated.h:200
Pool2dOp Table
Definition: pool_generated.h:194
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: pool_generated.h:203
::flatbuffers::Offset< Pool2dOp > Finish()
Definition: pool_generated.h:237
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: pool_generated.h:221
void add_input_width(uint32_t input_width)
Definition: pool_generated.h:212
void add_type(tt::target::ttnn::Pool2dOpType type)
Definition: pool_generated.h:197
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: pool_generated.h:218
void add_channels(uint32_t channels)
Definition: pool_generated.h:215
void add_ceil_mode(bool ceil_mode)
Definition: pool_generated.h:230
Definition: pool_generated.h:274
Pool2dOp type
Definition: pool_generated.h:275
static constexpr auto Create
Definition: pool_generated.h:276
Definition: pool_generated.h:98
static const Scale2D enum_value
Definition: pool_generated.h:99
Definition: pool_generated.h:332
::flatbuffers::Offset< UniformScale2D > Finish()
Definition: pool_generated.h:343
UniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:339
void add_scale(int32_t scale)
Definition: pool_generated.h:336
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:335
UniformScale2D Table
Definition: pool_generated.h:333
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:334
Definition: pool_generated.h:358
static constexpr auto Create
Definition: pool_generated.h:360
UniformScale2D type
Definition: pool_generated.h:359
Definition: pool_generated.h:487
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: pool_generated.h:491
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:489
::flatbuffers::Offset< UpsampleOp > Finish()
Definition: pool_generated.h:513
void add_mode(::flatbuffers::Offset<::flatbuffers::String > mode)
Definition: pool_generated.h:500
UpsampleOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:509
void add_scale_factor_type(tt::target::ttnn::Scale2D scale_factor_type)
Definition: pool_generated.h:494
void add_scale_factor(::flatbuffers::Offset< void > scale_factor)
Definition: pool_generated.h:497
UpsampleOp Table
Definition: pool_generated.h:488
void add_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config)
Definition: pool_generated.h:503
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:490
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: pool_generated.h:506
Definition: pool_generated.h:538
UpsampleOp type
Definition: pool_generated.h:539
static constexpr auto Create
Definition: pool_generated.h:540