TT-MLIR
pool_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_POOL_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_POOL_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
18 
19 namespace tt {
20 namespace target {
21 namespace ttnn {
22 
23 struct Pool2dOp;
24 struct Pool2dOpBuilder;
25 
26 struct UniformScale2D;
27 struct UniformScale2DBuilder;
28 
29 struct NonUniformScale2D;
30 struct NonUniformScale2DBuilder;
31 
32 struct UpsampleOp;
33 struct UpsampleOpBuilder;
34 
35 enum class Pool2dOpType : uint32_t {
36  AvgPool2d = 0,
37  MaxPool2d = 1,
38  MIN = AvgPool2d,
39  MAX = MaxPool2d
40 };
41 
42 inline const Pool2dOpType (&EnumValuesPool2dOpType())[2] {
43  static const Pool2dOpType values[] = {
46  };
47  return values;
48 }
49 
50 inline const char * const *EnumNamesPool2dOpType() {
51  static const char * const names[3] = {
52  "AvgPool2d",
53  "MaxPool2d",
54  nullptr
55  };
56  return names;
57 }
58 
59 inline const char *EnumNamePool2dOpType(Pool2dOpType e) {
60  if (::flatbuffers::IsOutRange(e, Pool2dOpType::AvgPool2d, Pool2dOpType::MaxPool2d)) return "";
61  const size_t index = static_cast<size_t>(e);
62  return EnumNamesPool2dOpType()[index];
63 }
64 
65 enum class Scale2D : uint8_t {
66  NONE = 0,
67  UniformScale2D = 1,
69  MIN = NONE,
71 };
72 
73 inline const Scale2D (&EnumValuesScale2D())[3] {
74  static const Scale2D values[] = {
78  };
79  return values;
80 }
81 
82 inline const char * const *EnumNamesScale2D() {
83  static const char * const names[4] = {
84  "NONE",
85  "UniformScale2D",
86  "NonUniformScale2D",
87  nullptr
88  };
89  return names;
90 }
91 
92 inline const char *EnumNameScale2D(Scale2D e) {
93  if (::flatbuffers::IsOutRange(e, Scale2D::NONE, Scale2D::NonUniformScale2D)) return "";
94  const size_t index = static_cast<size_t>(e);
95  return EnumNamesScale2D()[index];
96 }
97 
98 template<typename T> struct Scale2DTraits {
100 };
101 
102 template<> struct Scale2DTraits<tt::target::ttnn::UniformScale2D> {
104 };
105 
106 template<> struct Scale2DTraits<tt::target::ttnn::NonUniformScale2D> {
108 };
109 
110 bool VerifyScale2D(::flatbuffers::Verifier &verifier, const void *obj, Scale2D type);
111 bool VerifyScale2DVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<Scale2D> *types);
112 
113 struct Pool2dOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
115  struct Traits;
116  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
117  return "tt.target.ttnn.Pool2dOp";
118  }
119  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
120  VT_TYPE = 4,
121  VT_IN = 6,
122  VT_OUT = 8,
128  VT_STRIDE = 20,
134  VT_IN_PLACE_HALO = 32
135  };
137  return static_cast<tt::target::ttnn::Pool2dOpType>(GetField<uint32_t>(VT_TYPE, 0));
138  }
139  const tt::target::ttnn::TensorRef *in() const {
140  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
141  }
142  const tt::target::ttnn::TensorRef *out() const {
143  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
144  }
145  uint32_t batch_size() const {
146  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
147  }
148  uint32_t input_height() const {
149  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
150  }
151  uint32_t input_width() const {
152  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
153  }
154  uint32_t channels() const {
155  return GetField<uint32_t>(VT_CHANNELS, 0);
156  }
157  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
158  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
159  }
160  const ::flatbuffers::Vector<int32_t> *stride() const {
161  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
162  }
163  const ::flatbuffers::Vector<int32_t> *padding() const {
164  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
165  }
166  const ::flatbuffers::Vector<int32_t> *dilation() const {
167  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
168  }
169  const tt::target::ttnn::MemoryConfig *memory_config() const {
170  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_MEMORY_CONFIG);
171  }
172  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> applied_shard_scheme() const {
173  return GetOptional<uint16_t, tt::target::ttnn::TensorMemoryLayout>(VT_APPLIED_SHARD_SCHEME);
174  }
175  bool ceil_mode() const {
176  return GetField<uint8_t>(VT_CEIL_MODE, 0) != 0;
177  }
178  bool in_place_halo() const {
179  return GetField<uint8_t>(VT_IN_PLACE_HALO, 0) != 0;
180  }
181  bool Verify(::flatbuffers::Verifier &verifier) const {
182  return VerifyTableStart(verifier) &&
183  VerifyField<uint32_t>(verifier, VT_TYPE, 4) &&
184  VerifyOffset(verifier, VT_IN) &&
185  verifier.VerifyTable(in()) &&
186  VerifyOffset(verifier, VT_OUT) &&
187  verifier.VerifyTable(out()) &&
188  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
189  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
190  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
191  VerifyField<uint32_t>(verifier, VT_CHANNELS, 4) &&
192  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
193  verifier.VerifyVector(kernel_size()) &&
194  VerifyOffset(verifier, VT_STRIDE) &&
195  verifier.VerifyVector(stride()) &&
196  VerifyOffset(verifier, VT_PADDING) &&
197  verifier.VerifyVector(padding()) &&
198  VerifyOffset(verifier, VT_DILATION) &&
199  verifier.VerifyVector(dilation()) &&
200  VerifyOffset(verifier, VT_MEMORY_CONFIG) &&
201  verifier.VerifyTable(memory_config()) &&
202  VerifyField<uint16_t>(verifier, VT_APPLIED_SHARD_SCHEME, 2) &&
203  VerifyField<uint8_t>(verifier, VT_CEIL_MODE, 1) &&
204  VerifyField<uint8_t>(verifier, VT_IN_PLACE_HALO, 1) &&
205  verifier.EndTable();
206  }
207 };
208 
210  typedef Pool2dOp Table;
211  ::flatbuffers::FlatBufferBuilder &fbb_;
212  ::flatbuffers::uoffset_t start_;
214  fbb_.AddElement<uint32_t>(Pool2dOp::VT_TYPE, static_cast<uint32_t>(type), 0);
215  }
216  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
217  fbb_.AddOffset(Pool2dOp::VT_IN, in);
218  }
219  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
220  fbb_.AddOffset(Pool2dOp::VT_OUT, out);
221  }
222  void add_batch_size(uint32_t batch_size) {
223  fbb_.AddElement<uint32_t>(Pool2dOp::VT_BATCH_SIZE, batch_size, 0);
224  }
225  void add_input_height(uint32_t input_height) {
226  fbb_.AddElement<uint32_t>(Pool2dOp::VT_INPUT_HEIGHT, input_height, 0);
227  }
228  void add_input_width(uint32_t input_width) {
229  fbb_.AddElement<uint32_t>(Pool2dOp::VT_INPUT_WIDTH, input_width, 0);
230  }
231  void add_channels(uint32_t channels) {
232  fbb_.AddElement<uint32_t>(Pool2dOp::VT_CHANNELS, channels, 0);
233  }
234  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
235  fbb_.AddOffset(Pool2dOp::VT_KERNEL_SIZE, kernel_size);
236  }
237  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
238  fbb_.AddOffset(Pool2dOp::VT_STRIDE, stride);
239  }
240  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
241  fbb_.AddOffset(Pool2dOp::VT_PADDING, padding);
242  }
243  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
244  fbb_.AddOffset(Pool2dOp::VT_DILATION, dilation);
245  }
246  void add_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config) {
247  fbb_.AddOffset(Pool2dOp::VT_MEMORY_CONFIG, memory_config);
248  }
250  fbb_.AddElement<uint16_t>(Pool2dOp::VT_APPLIED_SHARD_SCHEME, static_cast<uint16_t>(applied_shard_scheme));
251  }
252  void add_ceil_mode(bool ceil_mode) {
253  fbb_.AddElement<uint8_t>(Pool2dOp::VT_CEIL_MODE, static_cast<uint8_t>(ceil_mode), 0);
254  }
255  void add_in_place_halo(bool in_place_halo) {
256  fbb_.AddElement<uint8_t>(Pool2dOp::VT_IN_PLACE_HALO, static_cast<uint8_t>(in_place_halo), 0);
257  }
258  explicit Pool2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
259  : fbb_(_fbb) {
260  start_ = fbb_.StartTable();
261  }
262  ::flatbuffers::Offset<Pool2dOp> Finish() {
263  const auto end = fbb_.EndTable(start_);
264  auto o = ::flatbuffers::Offset<Pool2dOp>(end);
265  return o;
266  }
267 };
268 
269 inline ::flatbuffers::Offset<Pool2dOp> CreatePool2dOp(
270  ::flatbuffers::FlatBufferBuilder &_fbb,
272  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
273  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
274  uint32_t batch_size = 0,
275  uint32_t input_height = 0,
276  uint32_t input_width = 0,
277  uint32_t channels = 0,
278  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
279  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
280  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
281  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
282  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
283  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> applied_shard_scheme = ::flatbuffers::nullopt,
284  bool ceil_mode = false,
285  bool in_place_halo = false) {
286  Pool2dOpBuilder builder_(_fbb);
287  builder_.add_memory_config(memory_config);
288  builder_.add_dilation(dilation);
289  builder_.add_padding(padding);
290  builder_.add_stride(stride);
291  builder_.add_kernel_size(kernel_size);
292  builder_.add_channels(channels);
293  builder_.add_input_width(input_width);
294  builder_.add_input_height(input_height);
295  builder_.add_batch_size(batch_size);
296  builder_.add_out(out);
297  builder_.add_in(in);
298  builder_.add_type(type);
299  if(applied_shard_scheme) { builder_.add_applied_shard_scheme(*applied_shard_scheme); }
300  builder_.add_in_place_halo(in_place_halo);
301  builder_.add_ceil_mode(ceil_mode);
302  return builder_.Finish();
303 }
304 
306  using type = Pool2dOp;
307  static auto constexpr Create = CreatePool2dOp;
308 };
309 
310 inline ::flatbuffers::Offset<Pool2dOp> CreatePool2dOpDirect(
311  ::flatbuffers::FlatBufferBuilder &_fbb,
313  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
314  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
315  uint32_t batch_size = 0,
316  uint32_t input_height = 0,
317  uint32_t input_width = 0,
318  uint32_t channels = 0,
319  const std::vector<int32_t> *kernel_size = nullptr,
320  const std::vector<int32_t> *stride = nullptr,
321  const std::vector<int32_t> *padding = nullptr,
322  const std::vector<int32_t> *dilation = nullptr,
323  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
324  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> applied_shard_scheme = ::flatbuffers::nullopt,
325  bool ceil_mode = false,
326  bool in_place_halo = false) {
327  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
328  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
329  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
330  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
332  _fbb,
333  type,
334  in,
335  out,
336  batch_size,
337  input_height,
338  input_width,
339  channels,
340  kernel_size__,
341  stride__,
342  padding__,
343  dilation__,
344  memory_config,
345  applied_shard_scheme,
346  ceil_mode,
347  in_place_halo);
348 }
349 
350 struct UniformScale2D FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
352  struct Traits;
353  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
354  return "tt.target.ttnn.UniformScale2D";
355  }
356  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
357  VT_SCALE = 4
358  };
359  int32_t scale() const {
360  return GetField<int32_t>(VT_SCALE, 0);
361  }
362  bool Verify(::flatbuffers::Verifier &verifier) const {
363  return VerifyTableStart(verifier) &&
364  VerifyField<int32_t>(verifier, VT_SCALE, 4) &&
365  verifier.EndTable();
366  }
367 };
368 
371  ::flatbuffers::FlatBufferBuilder &fbb_;
372  ::flatbuffers::uoffset_t start_;
373  void add_scale(int32_t scale) {
374  fbb_.AddElement<int32_t>(UniformScale2D::VT_SCALE, scale, 0);
375  }
376  explicit UniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
377  : fbb_(_fbb) {
378  start_ = fbb_.StartTable();
379  }
380  ::flatbuffers::Offset<UniformScale2D> Finish() {
381  const auto end = fbb_.EndTable(start_);
382  auto o = ::flatbuffers::Offset<UniformScale2D>(end);
383  return o;
384  }
385 };
386 
387 inline ::flatbuffers::Offset<UniformScale2D> CreateUniformScale2D(
388  ::flatbuffers::FlatBufferBuilder &_fbb,
389  int32_t scale = 0) {
390  UniformScale2DBuilder builder_(_fbb);
391  builder_.add_scale(scale);
392  return builder_.Finish();
393 }
394 
397  static auto constexpr Create = CreateUniformScale2D;
398 };
399 
400 struct NonUniformScale2D FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
402  struct Traits;
403  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
404  return "tt.target.ttnn.NonUniformScale2D";
405  }
406  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
407  VT_SCALE = 4
408  };
409  const ::flatbuffers::Vector<int32_t> *scale() const {
410  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SCALE);
411  }
412  bool Verify(::flatbuffers::Verifier &verifier) const {
413  return VerifyTableStart(verifier) &&
414  VerifyOffset(verifier, VT_SCALE) &&
415  verifier.VerifyVector(scale()) &&
416  verifier.EndTable();
417  }
418 };
419 
422  ::flatbuffers::FlatBufferBuilder &fbb_;
423  ::flatbuffers::uoffset_t start_;
424  void add_scale(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> scale) {
425  fbb_.AddOffset(NonUniformScale2D::VT_SCALE, scale);
426  }
427  explicit NonUniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
428  : fbb_(_fbb) {
429  start_ = fbb_.StartTable();
430  }
431  ::flatbuffers::Offset<NonUniformScale2D> Finish() {
432  const auto end = fbb_.EndTable(start_);
433  auto o = ::flatbuffers::Offset<NonUniformScale2D>(end);
434  return o;
435  }
436 };
437 
438 inline ::flatbuffers::Offset<NonUniformScale2D> CreateNonUniformScale2D(
439  ::flatbuffers::FlatBufferBuilder &_fbb,
440  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> scale = 0) {
441  NonUniformScale2DBuilder builder_(_fbb);
442  builder_.add_scale(scale);
443  return builder_.Finish();
444 }
445 
448  static auto constexpr Create = CreateNonUniformScale2D;
449 };
450 
451 inline ::flatbuffers::Offset<NonUniformScale2D> CreateNonUniformScale2DDirect(
452  ::flatbuffers::FlatBufferBuilder &_fbb,
453  const std::vector<int32_t> *scale = nullptr) {
454  auto scale__ = scale ? _fbb.CreateVector<int32_t>(*scale) : 0;
456  _fbb,
457  scale__);
458 }
459 
460 struct UpsampleOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
462  struct Traits;
463  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
464  return "tt.target.ttnn.UpsampleOp";
465  }
466  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
467  VT_IN = 4,
470  VT_MODE = 10,
471  VT_MEMORY_CONFIG = 12,
472  VT_OUT = 14
473  };
474  const tt::target::ttnn::TensorRef *in() const {
475  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_IN);
476  }
478  return static_cast<tt::target::ttnn::Scale2D>(GetField<uint8_t>(VT_SCALE_FACTOR_TYPE, 0));
479  }
480  const void *scale_factor() const {
481  return GetPointer<const void *>(VT_SCALE_FACTOR);
482  }
483  template<typename T> const T *scale_factor_as() const;
484  const tt::target::ttnn::UniformScale2D *scale_factor_as_UniformScale2D() const {
485  return scale_factor_type() == tt::target::ttnn::Scale2D::UniformScale2D ? static_cast<const tt::target::ttnn::UniformScale2D *>(scale_factor()) : nullptr;
486  }
487  const tt::target::ttnn::NonUniformScale2D *scale_factor_as_NonUniformScale2D() const {
488  return scale_factor_type() == tt::target::ttnn::Scale2D::NonUniformScale2D ? static_cast<const tt::target::ttnn::NonUniformScale2D *>(scale_factor()) : nullptr;
489  }
490  const ::flatbuffers::String *mode() const {
491  return GetPointer<const ::flatbuffers::String *>(VT_MODE);
492  }
493  const tt::target::ttnn::MemoryConfig *memory_config() const {
494  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_MEMORY_CONFIG);
495  }
496  const tt::target::ttnn::TensorRef *out() const {
497  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
498  }
499  bool Verify(::flatbuffers::Verifier &verifier) const {
500  return VerifyTableStart(verifier) &&
501  VerifyOffset(verifier, VT_IN) &&
502  verifier.VerifyTable(in()) &&
503  VerifyField<uint8_t>(verifier, VT_SCALE_FACTOR_TYPE, 1) &&
504  VerifyOffset(verifier, VT_SCALE_FACTOR) &&
505  VerifyScale2D(verifier, scale_factor(), scale_factor_type()) &&
506  VerifyOffset(verifier, VT_MODE) &&
507  verifier.VerifyString(mode()) &&
508  VerifyOffset(verifier, VT_MEMORY_CONFIG) &&
509  verifier.VerifyTable(memory_config()) &&
510  VerifyOffset(verifier, VT_OUT) &&
511  verifier.VerifyTable(out()) &&
512  verifier.EndTable();
513  }
514 };
515 
516 template<> inline const tt::target::ttnn::UniformScale2D *UpsampleOp::scale_factor_as<tt::target::ttnn::UniformScale2D>() const {
517  return scale_factor_as_UniformScale2D();
518 }
519 
520 template<> inline const tt::target::ttnn::NonUniformScale2D *UpsampleOp::scale_factor_as<tt::target::ttnn::NonUniformScale2D>() const {
521  return scale_factor_as_NonUniformScale2D();
522 }
523 
525  typedef UpsampleOp Table;
526  ::flatbuffers::FlatBufferBuilder &fbb_;
527  ::flatbuffers::uoffset_t start_;
528  void add_in(::flatbuffers::Offset<tt::target::ttnn::TensorRef> in) {
529  fbb_.AddOffset(UpsampleOp::VT_IN, in);
530  }
532  fbb_.AddElement<uint8_t>(UpsampleOp::VT_SCALE_FACTOR_TYPE, static_cast<uint8_t>(scale_factor_type), 0);
533  }
534  void add_scale_factor(::flatbuffers::Offset<void> scale_factor) {
535  fbb_.AddOffset(UpsampleOp::VT_SCALE_FACTOR, scale_factor);
536  }
537  void add_mode(::flatbuffers::Offset<::flatbuffers::String> mode) {
538  fbb_.AddOffset(UpsampleOp::VT_MODE, mode);
539  }
540  void add_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config) {
541  fbb_.AddOffset(UpsampleOp::VT_MEMORY_CONFIG, memory_config);
542  }
543  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
544  fbb_.AddOffset(UpsampleOp::VT_OUT, out);
545  }
546  explicit UpsampleOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
547  : fbb_(_fbb) {
548  start_ = fbb_.StartTable();
549  }
550  ::flatbuffers::Offset<UpsampleOp> Finish() {
551  const auto end = fbb_.EndTable(start_);
552  auto o = ::flatbuffers::Offset<UpsampleOp>(end);
553  return o;
554  }
555 };
556 
557 inline ::flatbuffers::Offset<UpsampleOp> CreateUpsampleOp(
558  ::flatbuffers::FlatBufferBuilder &_fbb,
559  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
561  ::flatbuffers::Offset<void> scale_factor = 0,
562  ::flatbuffers::Offset<::flatbuffers::String> mode = 0,
563  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
564  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0) {
565  UpsampleOpBuilder builder_(_fbb);
566  builder_.add_out(out);
567  builder_.add_memory_config(memory_config);
568  builder_.add_mode(mode);
569  builder_.add_scale_factor(scale_factor);
570  builder_.add_in(in);
571  builder_.add_scale_factor_type(scale_factor_type);
572  return builder_.Finish();
573 }
574 
576  using type = UpsampleOp;
577  static auto constexpr Create = CreateUpsampleOp;
578 };
579 
580 inline ::flatbuffers::Offset<UpsampleOp> CreateUpsampleOpDirect(
581  ::flatbuffers::FlatBufferBuilder &_fbb,
582  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> in = 0,
584  ::flatbuffers::Offset<void> scale_factor = 0,
585  const char *mode = nullptr,
586  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0,
587  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0) {
588  auto mode__ = mode ? _fbb.CreateString(mode) : 0;
590  _fbb,
591  in,
592  scale_factor_type,
593  scale_factor,
594  mode__,
595  memory_config,
596  out);
597 }
598 
599 inline bool VerifyScale2D(::flatbuffers::Verifier &verifier, const void *obj, Scale2D type) {
600  switch (type) {
601  case Scale2D::NONE: {
602  return true;
603  }
605  auto ptr = reinterpret_cast<const tt::target::ttnn::UniformScale2D *>(obj);
606  return verifier.VerifyTable(ptr);
607  }
609  auto ptr = reinterpret_cast<const tt::target::ttnn::NonUniformScale2D *>(obj);
610  return verifier.VerifyTable(ptr);
611  }
612  default: return true;
613  }
614 }
615 
616 inline bool VerifyScale2DVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<Scale2D> *types) {
617  if (!values || !types) return !values && !types;
618  if (values->size() != types->size()) return false;
619  for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
620  if (!VerifyScale2D(
621  verifier, values->Get(i), types->GetEnum<Scale2D>(i))) {
622  return false;
623  }
624  }
625  return true;
626 }
627 
628 } // namespace ttnn
629 } // namespace target
630 } // namespace tt
631 
632 #endif // FLATBUFFERS_GENERATED_POOL_TT_TARGET_TTNN_H_
VT_SCALE
Definition: eltwise_generated.h:1302
inline ::flatbuffers::Offset< Pool2dOp > CreatePool2dOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::Pool2dOpType type=tt::target::ttnn::Pool2dOpType::AvgPool2d, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > applied_shard_scheme=::flatbuffers::nullopt, bool ceil_mode=false, bool in_place_halo=false)
Definition: pool_generated.h:310
inline ::flatbuffers::Offset< UniformScale2D > CreateUniformScale2D(::flatbuffers::FlatBufferBuilder &_fbb, int32_t scale=0)
Definition: pool_generated.h:387
const char * EnumNamePool2dOpType(Pool2dOpType e)
Definition: pool_generated.h:59
const char * EnumNameScale2D(Scale2D e)
Definition: pool_generated.h:92
bool VerifyScale2DVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset< void >> *values, const ::flatbuffers::Vector< Scale2D > *types)
Definition: pool_generated.h:616
const char *const * EnumNamesScale2D()
Definition: pool_generated.h:82
inline ::flatbuffers::Offset< NonUniformScale2D > CreateNonUniformScale2DDirect(::flatbuffers::FlatBufferBuilder &_fbb, const std::vector< int32_t > *scale=nullptr)
Definition: pool_generated.h:451
const Scale2D(& EnumValuesScale2D())[3]
Definition: pool_generated.h:73
bool VerifyScale2D(::flatbuffers::Verifier &verifier, const void *obj, Scale2D type)
Definition: pool_generated.h:599
const char *const * EnumNamesPool2dOpType()
Definition: pool_generated.h:50
inline ::flatbuffers::Offset< NonUniformScale2D > CreateNonUniformScale2D(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> scale=0)
Definition: pool_generated.h:438
inline ::flatbuffers::Offset< UpsampleOp > CreateUpsampleOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, const char *mode=nullptr, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
Definition: pool_generated.h:580
inline ::flatbuffers::Offset< Pool2dOp > CreatePool2dOp(::flatbuffers::FlatBufferBuilder &_fbb, tt::target::ttnn::Pool2dOpType type=tt::target::ttnn::Pool2dOpType::AvgPool2d, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, uint32_t channels=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > applied_shard_scheme=::flatbuffers::nullopt, bool ceil_mode=false, bool in_place_halo=false)
Definition: pool_generated.h:269
inline ::flatbuffers::Offset< UpsampleOp > CreateUpsampleOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > in=0, tt::target::ttnn::Scale2D scale_factor_type=tt::target::ttnn::Scale2D::NONE, ::flatbuffers::Offset< void > scale_factor=0, ::flatbuffers::Offset<::flatbuffers::String > mode=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0)
Definition: pool_generated.h:557
TensorMemoryLayout
Definition: types_generated.h:62
Pool2dOpType
Definition: pool_generated.h:35
Scale2D
Definition: pool_generated.h:65
const Pool2dOpType(& EnumValuesPool2dOpType())[2]
Definition: pool_generated.h:42
Definition: debug_info_generated.h:18
VT_APPLIED_SHARD_SCHEME
Definition: pool_generated.h:132
VT_DILATION
Definition: pool_generated.h:130
VT_PADDING
Definition: pool_generated.h:129
VT_SCALE_FACTOR_TYPE
Definition: pool_generated.h:468
VT_STRIDE
Definition: pool_generated.h:128
VT_CHANNELS
Definition: pool_generated.h:126
VT_MODE
Definition: pool_generated.h:470
VT_KERNEL_SIZE
Definition: pool_generated.h:127
VT_BATCH_SIZE
Definition: pool_generated.h:123
VT_SCALE_FACTOR
Definition: pool_generated.h:469
VT_TYPE
Definition: pool_generated.h:120
VT_INPUT_HEIGHT
Definition: pool_generated.h:124
VT_IN
Definition: pool_generated.h:121
VT_INPUT_WIDTH
Definition: pool_generated.h:125
VT_OUT
Definition: pool_generated.h:122
VT_CEIL_MODE
Definition: pool_generated.h:133
VT_MEMORY_CONFIG
Definition: pool_generated.h:131
Definition: debug_info_generated.h:36
Definition: binary_generated.h:31
const tt::target::ttnn::UniformScale2D * scale_factor_as_UniformScale2D() const
Definition: pool_generated.h:484
uint32_t batch_size() const
Definition: pool_generated.h:145
const tt::target::ttnn::NonUniformScale2D * scale_factor_as_NonUniformScale2D() const
Definition: pool_generated.h:487
const ::flatbuffers::Vector< int32_t > * scale() const
Definition: pool_generated.h:409
uint32_t input_width() const
Definition: pool_generated.h:151
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > applied_shard_scheme() const
Definition: pool_generated.h:172
const tt::target::ttnn::TensorRef * out() const
Definition: pool_generated.h:142
const tt::target::ttnn::MemoryConfig * memory_config() const
Definition: pool_generated.h:169
bool ceil_mode() const
Definition: pool_generated.h:175
tt::target::ttnn::Scale2D scale_factor_type() const
Definition: pool_generated.h:477
uint32_t channels() const
Definition: pool_generated.h:154
tt::target::ttnn::Pool2dOpType type() const
Definition: pool_generated.h:136
int32_t scale() const
Definition: pool_generated.h:359
UpsampleOpBuilder Builder
Definition: pool_generated.h:461
const ::flatbuffers::Vector< int32_t > * dilation() const
Definition: pool_generated.h:166
uint32_t input_height() const
Definition: pool_generated.h:148
const ::flatbuffers::Vector< int32_t > * stride() const
Definition: pool_generated.h:160
const ::flatbuffers::Vector< int32_t > * padding() const
Definition: pool_generated.h:163
const tt::target::ttnn::TensorRef * in() const
Definition: pool_generated.h:139
const ::flatbuffers::Vector< int32_t > * kernel_size() const
Definition: pool_generated.h:157
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: pool_generated.h:181
const void * scale_factor() const
Definition: pool_generated.h:480
bool in_place_halo() const
Definition: pool_generated.h:178
Pool2dOpBuilder Builder
Definition: pool_generated.h:114
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: pool_generated.h:116
NonUniformScale2DBuilder Builder
Definition: pool_generated.h:401
const ::flatbuffers::String * mode() const
Definition: pool_generated.h:490
UniformScale2DBuilder Builder
Definition: pool_generated.h:351
Definition: pool_generated.h:420
void add_scale(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> scale)
Definition: pool_generated.h:424
::flatbuffers::Offset< NonUniformScale2D > Finish()
Definition: pool_generated.h:431
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:423
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:422
NonUniformScale2D Table
Definition: pool_generated.h:421
NonUniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:427
Definition: pool_generated.h:446
static constexpr auto Create
Definition: pool_generated.h:448
NonUniformScale2D type
Definition: pool_generated.h:447
Definition: pool_generated.h:209
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: pool_generated.h:243
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: pool_generated.h:240
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:212
Pool2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:258
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:211
void add_batch_size(uint32_t batch_size)
Definition: pool_generated.h:222
void add_input_height(uint32_t input_height)
Definition: pool_generated.h:225
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: pool_generated.h:216
Pool2dOp Table
Definition: pool_generated.h:210
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: pool_generated.h:219
void add_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config)
Definition: pool_generated.h:246
void add_in_place_halo(bool in_place_halo)
Definition: pool_generated.h:255
::flatbuffers::Offset< Pool2dOp > Finish()
Definition: pool_generated.h:262
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: pool_generated.h:237
void add_input_width(uint32_t input_width)
Definition: pool_generated.h:228
void add_type(tt::target::ttnn::Pool2dOpType type)
Definition: pool_generated.h:213
void add_applied_shard_scheme(tt::target::ttnn::TensorMemoryLayout applied_shard_scheme)
Definition: pool_generated.h:249
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: pool_generated.h:234
void add_channels(uint32_t channels)
Definition: pool_generated.h:231
void add_ceil_mode(bool ceil_mode)
Definition: pool_generated.h:252
Definition: pool_generated.h:305
Pool2dOp type
Definition: pool_generated.h:306
static constexpr auto Create
Definition: pool_generated.h:307
Definition: pool_generated.h:98
static const Scale2D enum_value
Definition: pool_generated.h:99
Definition: pool_generated.h:369
::flatbuffers::Offset< UniformScale2D > Finish()
Definition: pool_generated.h:380
UniformScale2DBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:376
void add_scale(int32_t scale)
Definition: pool_generated.h:373
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:372
UniformScale2D Table
Definition: pool_generated.h:370
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:371
Definition: pool_generated.h:395
static constexpr auto Create
Definition: pool_generated.h:397
UniformScale2D type
Definition: pool_generated.h:396
Definition: pool_generated.h:524
void add_in(::flatbuffers::Offset< tt::target::ttnn::TensorRef > in)
Definition: pool_generated.h:528
::flatbuffers::FlatBufferBuilder & fbb_
Definition: pool_generated.h:526
::flatbuffers::Offset< UpsampleOp > Finish()
Definition: pool_generated.h:550
void add_mode(::flatbuffers::Offset<::flatbuffers::String > mode)
Definition: pool_generated.h:537
UpsampleOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: pool_generated.h:546
void add_scale_factor_type(tt::target::ttnn::Scale2D scale_factor_type)
Definition: pool_generated.h:531
void add_scale_factor(::flatbuffers::Offset< void > scale_factor)
Definition: pool_generated.h:534
UpsampleOp Table
Definition: pool_generated.h:525
void add_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config)
Definition: pool_generated.h:540
::flatbuffers::uoffset_t start_
Definition: pool_generated.h:527
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: pool_generated.h:543
Definition: pool_generated.h:575
UpsampleOp type
Definition: pool_generated.h:576
static constexpr auto Create
Definition: pool_generated.h:577