TT-MLIR
conv_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_CONV_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_CONV_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
18 
19 namespace tt {
20 namespace target {
21 namespace ttnn {
22 
23 struct Conv2dConfig;
24 struct Conv2dConfigBuilder;
25 
27 struct PrepareConv2dWeightsOpBuilder;
28 
29 struct Conv2dOp;
30 struct Conv2dOpBuilder;
31 
32 struct ConvTranspose2dOp;
33 struct ConvTranspose2dOpBuilder;
34 
35 struct Conv2dConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
37  struct Traits;
38  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
39  VT_DTYPE = 4,
58  VT_ENABLE_SUBBLOCK_PADDING = 42
59  };
61  return static_cast<tt::target::DataType>(GetField<uint16_t>(VT_DTYPE, 0));
62  }
64  return static_cast<tt::target::DataType>(GetField<uint16_t>(VT_WEIGHTS_DTYPE, 0));
65  }
66  const ::flatbuffers::String *activation() const {
67  return GetPointer<const ::flatbuffers::String *>(VT_ACTIVATION);
68  }
69  uint32_t input_channels_alignment() const {
70  return GetField<uint32_t>(VT_INPUT_CHANNELS_ALIGNMENT, 0);
71  }
72  bool deallocate_activation() const {
73  return GetField<uint8_t>(VT_DEALLOCATE_ACTIVATION, 0) != 0;
74  }
75  bool reallocate_halo_output() const {
76  return GetField<uint8_t>(VT_REALLOCATE_HALO_OUTPUT, 0) != 0;
77  }
78  uint32_t act_block_h_override() const {
79  return GetField<uint32_t>(VT_ACT_BLOCK_H_OVERRIDE, 0);
80  }
81  uint32_t act_block_w_div() const {
82  return GetField<uint32_t>(VT_ACT_BLOCK_W_DIV, 0);
83  }
84  bool reshard_if_not_optimal() const {
85  return GetField<uint8_t>(VT_RESHARD_IF_NOT_OPTIMAL, 0) != 0;
86  }
87  bool override_sharding_config() const {
88  return GetField<uint8_t>(VT_OVERRIDE_SHARDING_CONFIG, 0) != 0;
89  }
90  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> shard_layout() const {
91  return GetOptional<uint16_t, tt::target::ttnn::TensorMemoryLayout>(VT_SHARD_LAYOUT);
92  }
93  const tt::target::ttnn::CoreRangeSet *core_grid() const {
94  return GetPointer<const tt::target::ttnn::CoreRangeSet *>(VT_CORE_GRID);
95  }
96  bool transpose_shards() const {
97  return GetField<uint8_t>(VT_TRANSPOSE_SHARDS, 0) != 0;
98  }
100  return static_cast<tt::target::TensorLayout>(GetField<uint16_t>(VT_OUTPUT_LAYOUT, 0));
101  }
103  return GetField<uint8_t>(VT_PREPROCESS_WEIGHTS_ON_DEVICE, 0) != 0;
104  }
106  return GetField<uint8_t>(VT_ALWAYS_PREPROCESS_WEIGHTS, 0) != 0;
107  }
109  return GetField<uint8_t>(VT_ENABLE_ACT_DOUBLE_BUFFER, 0) != 0;
110  }
112  return GetField<uint8_t>(VT_ENABLE_WEIGHTS_DOUBLE_BUFFER, 0) != 0;
113  }
114  bool enable_split_reader() const {
115  return GetField<uint8_t>(VT_ENABLE_SPLIT_READER, 0) != 0;
116  }
117  bool enable_subblock_padding() const {
118  return GetField<uint8_t>(VT_ENABLE_SUBBLOCK_PADDING, 0) != 0;
119  }
120  bool Verify(::flatbuffers::Verifier &verifier) const {
121  return VerifyTableStart(verifier) &&
122  VerifyField<uint16_t>(verifier, VT_DTYPE, 2) &&
123  VerifyField<uint16_t>(verifier, VT_WEIGHTS_DTYPE, 2) &&
124  VerifyOffset(verifier, VT_ACTIVATION) &&
125  verifier.VerifyString(activation()) &&
126  VerifyField<uint32_t>(verifier, VT_INPUT_CHANNELS_ALIGNMENT, 4) &&
127  VerifyField<uint8_t>(verifier, VT_DEALLOCATE_ACTIVATION, 1) &&
128  VerifyField<uint8_t>(verifier, VT_REALLOCATE_HALO_OUTPUT, 1) &&
129  VerifyField<uint32_t>(verifier, VT_ACT_BLOCK_H_OVERRIDE, 4) &&
130  VerifyField<uint32_t>(verifier, VT_ACT_BLOCK_W_DIV, 4) &&
131  VerifyField<uint8_t>(verifier, VT_RESHARD_IF_NOT_OPTIMAL, 1) &&
132  VerifyField<uint8_t>(verifier, VT_OVERRIDE_SHARDING_CONFIG, 1) &&
133  VerifyField<uint16_t>(verifier, VT_SHARD_LAYOUT, 2) &&
134  VerifyOffset(verifier, VT_CORE_GRID) &&
135  verifier.VerifyTable(core_grid()) &&
136  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_SHARDS, 1) &&
137  VerifyField<uint16_t>(verifier, VT_OUTPUT_LAYOUT, 2) &&
138  VerifyField<uint8_t>(verifier, VT_PREPROCESS_WEIGHTS_ON_DEVICE, 1) &&
139  VerifyField<uint8_t>(verifier, VT_ALWAYS_PREPROCESS_WEIGHTS, 1) &&
140  VerifyField<uint8_t>(verifier, VT_ENABLE_ACT_DOUBLE_BUFFER, 1) &&
141  VerifyField<uint8_t>(verifier, VT_ENABLE_WEIGHTS_DOUBLE_BUFFER, 1) &&
142  VerifyField<uint8_t>(verifier, VT_ENABLE_SPLIT_READER, 1) &&
143  VerifyField<uint8_t>(verifier, VT_ENABLE_SUBBLOCK_PADDING, 1) &&
144  verifier.EndTable();
145  }
146 };
147 
149  typedef Conv2dConfig Table;
150  ::flatbuffers::FlatBufferBuilder &fbb_;
151  ::flatbuffers::uoffset_t start_;
153  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_DTYPE, static_cast<uint16_t>(dtype), 0);
154  }
156  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_WEIGHTS_DTYPE, static_cast<uint16_t>(weights_dtype), 0);
157  }
158  void add_activation(::flatbuffers::Offset<::flatbuffers::String> activation) {
159  fbb_.AddOffset(Conv2dConfig::VT_ACTIVATION, activation);
160  }
161  void add_input_channels_alignment(uint32_t input_channels_alignment) {
162  fbb_.AddElement<uint32_t>(Conv2dConfig::VT_INPUT_CHANNELS_ALIGNMENT, input_channels_alignment, 0);
163  }
164  void add_deallocate_activation(bool deallocate_activation) {
165  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_DEALLOCATE_ACTIVATION, static_cast<uint8_t>(deallocate_activation), 0);
166  }
167  void add_reallocate_halo_output(bool reallocate_halo_output) {
168  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_REALLOCATE_HALO_OUTPUT, static_cast<uint8_t>(reallocate_halo_output), 0);
169  }
170  void add_act_block_h_override(uint32_t act_block_h_override) {
171  fbb_.AddElement<uint32_t>(Conv2dConfig::VT_ACT_BLOCK_H_OVERRIDE, act_block_h_override, 0);
172  }
173  void add_act_block_w_div(uint32_t act_block_w_div) {
174  fbb_.AddElement<uint32_t>(Conv2dConfig::VT_ACT_BLOCK_W_DIV, act_block_w_div, 0);
175  }
176  void add_reshard_if_not_optimal(bool reshard_if_not_optimal) {
177  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_RESHARD_IF_NOT_OPTIMAL, static_cast<uint8_t>(reshard_if_not_optimal), 0);
178  }
179  void add_override_sharding_config(bool override_sharding_config) {
180  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_OVERRIDE_SHARDING_CONFIG, static_cast<uint8_t>(override_sharding_config), 0);
181  }
183  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_SHARD_LAYOUT, static_cast<uint16_t>(shard_layout));
184  }
185  void add_core_grid(::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> core_grid) {
186  fbb_.AddOffset(Conv2dConfig::VT_CORE_GRID, core_grid);
187  }
188  void add_transpose_shards(bool transpose_shards) {
189  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_TRANSPOSE_SHARDS, static_cast<uint8_t>(transpose_shards), 0);
190  }
192  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_OUTPUT_LAYOUT, static_cast<uint16_t>(output_layout), 0);
193  }
194  void add_preprocess_weights_on_device(bool preprocess_weights_on_device) {
195  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_PREPROCESS_WEIGHTS_ON_DEVICE, static_cast<uint8_t>(preprocess_weights_on_device), 0);
196  }
197  void add_always_preprocess_weights(bool always_preprocess_weights) {
198  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ALWAYS_PREPROCESS_WEIGHTS, static_cast<uint8_t>(always_preprocess_weights), 0);
199  }
200  void add_enable_act_double_buffer(bool enable_act_double_buffer) {
201  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_ACT_DOUBLE_BUFFER, static_cast<uint8_t>(enable_act_double_buffer), 0);
202  }
203  void add_enable_weights_double_buffer(bool enable_weights_double_buffer) {
204  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_WEIGHTS_DOUBLE_BUFFER, static_cast<uint8_t>(enable_weights_double_buffer), 0);
205  }
206  void add_enable_split_reader(bool enable_split_reader) {
207  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_SPLIT_READER, static_cast<uint8_t>(enable_split_reader), 0);
208  }
209  void add_enable_subblock_padding(bool enable_subblock_padding) {
210  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_SUBBLOCK_PADDING, static_cast<uint8_t>(enable_subblock_padding), 0);
211  }
212  explicit Conv2dConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
213  : fbb_(_fbb) {
214  start_ = fbb_.StartTable();
215  }
216  ::flatbuffers::Offset<Conv2dConfig> Finish() {
217  const auto end = fbb_.EndTable(start_);
218  auto o = ::flatbuffers::Offset<Conv2dConfig>(end);
219  return o;
220  }
221 };
222 
223 inline ::flatbuffers::Offset<Conv2dConfig> CreateConv2dConfig(
224  ::flatbuffers::FlatBufferBuilder &_fbb,
227  ::flatbuffers::Offset<::flatbuffers::String> activation = 0,
228  uint32_t input_channels_alignment = 0,
229  bool deallocate_activation = false,
230  bool reallocate_halo_output = false,
231  uint32_t act_block_h_override = 0,
232  uint32_t act_block_w_div = 0,
233  bool reshard_if_not_optimal = false,
234  bool override_sharding_config = false,
235  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> shard_layout = ::flatbuffers::nullopt,
236  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> core_grid = 0,
237  bool transpose_shards = false,
239  bool preprocess_weights_on_device = false,
240  bool always_preprocess_weights = false,
241  bool enable_act_double_buffer = false,
242  bool enable_weights_double_buffer = false,
243  bool enable_split_reader = false,
244  bool enable_subblock_padding = false) {
245  Conv2dConfigBuilder builder_(_fbb);
246  builder_.add_core_grid(core_grid);
247  builder_.add_act_block_w_div(act_block_w_div);
248  builder_.add_act_block_h_override(act_block_h_override);
249  builder_.add_input_channels_alignment(input_channels_alignment);
250  builder_.add_activation(activation);
251  builder_.add_output_layout(output_layout);
252  if(shard_layout) { builder_.add_shard_layout(*shard_layout); }
253  builder_.add_weights_dtype(weights_dtype);
254  builder_.add_dtype(dtype);
255  builder_.add_enable_subblock_padding(enable_subblock_padding);
256  builder_.add_enable_split_reader(enable_split_reader);
257  builder_.add_enable_weights_double_buffer(enable_weights_double_buffer);
258  builder_.add_enable_act_double_buffer(enable_act_double_buffer);
259  builder_.add_always_preprocess_weights(always_preprocess_weights);
260  builder_.add_preprocess_weights_on_device(preprocess_weights_on_device);
261  builder_.add_transpose_shards(transpose_shards);
262  builder_.add_override_sharding_config(override_sharding_config);
263  builder_.add_reshard_if_not_optimal(reshard_if_not_optimal);
264  builder_.add_reallocate_halo_output(reallocate_halo_output);
265  builder_.add_deallocate_activation(deallocate_activation);
266  return builder_.Finish();
267 }
268 
270  using type = Conv2dConfig;
271  static auto constexpr Create = CreateConv2dConfig;
272 };
273 
274 inline ::flatbuffers::Offset<Conv2dConfig> CreateConv2dConfigDirect(
275  ::flatbuffers::FlatBufferBuilder &_fbb,
278  const char *activation = nullptr,
279  uint32_t input_channels_alignment = 0,
280  bool deallocate_activation = false,
281  bool reallocate_halo_output = false,
282  uint32_t act_block_h_override = 0,
283  uint32_t act_block_w_div = 0,
284  bool reshard_if_not_optimal = false,
285  bool override_sharding_config = false,
286  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> shard_layout = ::flatbuffers::nullopt,
287  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> core_grid = 0,
288  bool transpose_shards = false,
290  bool preprocess_weights_on_device = false,
291  bool always_preprocess_weights = false,
292  bool enable_act_double_buffer = false,
293  bool enable_weights_double_buffer = false,
294  bool enable_split_reader = false,
295  bool enable_subblock_padding = false) {
296  auto activation__ = activation ? _fbb.CreateString(activation) : 0;
298  _fbb,
299  dtype,
300  weights_dtype,
301  activation__,
302  input_channels_alignment,
303  deallocate_activation,
304  reallocate_halo_output,
305  act_block_h_override,
306  act_block_w_div,
307  reshard_if_not_optimal,
308  override_sharding_config,
309  shard_layout,
310  core_grid,
311  transpose_shards,
312  output_layout,
313  preprocess_weights_on_device,
314  always_preprocess_weights,
315  enable_act_double_buffer,
316  enable_weights_double_buffer,
317  enable_split_reader,
318  enable_subblock_padding);
319 }
320 
321 struct PrepareConv2dWeightsOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
323  struct Traits;
324  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
326  VT_OUT = 6,
336  VT_STRIDE = 26,
340  VT_GROUPS = 34,
341  VT_DEVICE = 36,
342  VT_CONV2D_CONFIG = 38
343  };
344  const tt::target::ttnn::TensorRef *weight_tensor() const {
345  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT_TENSOR);
346  }
347  const tt::target::ttnn::TensorRef *out() const {
348  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
349  }
350  const tt::target::ttnn::MemoryConfig *input_memory_config() const {
351  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_INPUT_MEMORY_CONFIG);
352  }
354  return static_cast<tt::target::TensorLayout>(GetField<uint16_t>(VT_INPUT_TENSOR_LAYOUT, 0));
355  }
356  const ::flatbuffers::String *weights_format() const {
357  return GetPointer<const ::flatbuffers::String *>(VT_WEIGHTS_FORMAT);
358  }
359  uint32_t in_channels() const {
360  return GetField<uint32_t>(VT_IN_CHANNELS, 0);
361  }
362  uint32_t out_channels() const {
363  return GetField<uint32_t>(VT_OUT_CHANNELS, 0);
364  }
365  uint32_t batch_size() const {
366  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
367  }
368  uint32_t input_height() const {
369  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
370  }
371  uint32_t input_width() const {
372  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
373  }
374  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
375  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
376  }
377  const ::flatbuffers::Vector<int32_t> *stride() const {
378  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
379  }
380  const ::flatbuffers::Vector<int32_t> *padding() const {
381  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
382  }
383  const ::flatbuffers::Vector<int32_t> *dilation() const {
384  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
385  }
386  bool has_bias() const {
387  return GetField<uint8_t>(VT_HAS_BIAS, 0) != 0;
388  }
389  uint32_t groups() const {
390  return GetField<uint32_t>(VT_GROUPS, 0);
391  }
392  const tt::target::DeviceRef *device() const {
393  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
394  }
395  const tt::target::ttnn::Conv2dConfig *conv2d_config() const {
396  return GetPointer<const tt::target::ttnn::Conv2dConfig *>(VT_CONV2D_CONFIG);
397  }
398  bool Verify(::flatbuffers::Verifier &verifier) const {
399  return VerifyTableStart(verifier) &&
400  VerifyOffset(verifier, VT_WEIGHT_TENSOR) &&
401  verifier.VerifyTable(weight_tensor()) &&
402  VerifyOffset(verifier, VT_OUT) &&
403  verifier.VerifyTable(out()) &&
404  VerifyOffset(verifier, VT_INPUT_MEMORY_CONFIG) &&
405  verifier.VerifyTable(input_memory_config()) &&
406  VerifyField<uint16_t>(verifier, VT_INPUT_TENSOR_LAYOUT, 2) &&
407  VerifyOffset(verifier, VT_WEIGHTS_FORMAT) &&
408  verifier.VerifyString(weights_format()) &&
409  VerifyField<uint32_t>(verifier, VT_IN_CHANNELS, 4) &&
410  VerifyField<uint32_t>(verifier, VT_OUT_CHANNELS, 4) &&
411  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
412  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
413  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
414  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
415  verifier.VerifyVector(kernel_size()) &&
416  VerifyOffset(verifier, VT_STRIDE) &&
417  verifier.VerifyVector(stride()) &&
418  VerifyOffset(verifier, VT_PADDING) &&
419  verifier.VerifyVector(padding()) &&
420  VerifyOffset(verifier, VT_DILATION) &&
421  verifier.VerifyVector(dilation()) &&
422  VerifyField<uint8_t>(verifier, VT_HAS_BIAS, 1) &&
423  VerifyField<uint32_t>(verifier, VT_GROUPS, 4) &&
424  VerifyOffset(verifier, VT_DEVICE) &&
425  verifier.VerifyTable(device()) &&
426  VerifyOffset(verifier, VT_CONV2D_CONFIG) &&
427  verifier.VerifyTable(conv2d_config()) &&
428  verifier.EndTable();
429  }
430 };
431 
434  ::flatbuffers::FlatBufferBuilder &fbb_;
435  ::flatbuffers::uoffset_t start_;
436  void add_weight_tensor(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight_tensor) {
437  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_WEIGHT_TENSOR, weight_tensor);
438  }
439  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
440  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_OUT, out);
441  }
442  void add_input_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> input_memory_config) {
443  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_INPUT_MEMORY_CONFIG, input_memory_config);
444  }
446  fbb_.AddElement<uint16_t>(PrepareConv2dWeightsOp::VT_INPUT_TENSOR_LAYOUT, static_cast<uint16_t>(input_tensor_layout), 0);
447  }
448  void add_weights_format(::flatbuffers::Offset<::flatbuffers::String> weights_format) {
449  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_WEIGHTS_FORMAT, weights_format);
450  }
451  void add_in_channels(uint32_t in_channels) {
452  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_IN_CHANNELS, in_channels, 0);
453  }
454  void add_out_channels(uint32_t out_channels) {
455  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_OUT_CHANNELS, out_channels, 0);
456  }
457  void add_batch_size(uint32_t batch_size) {
458  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_BATCH_SIZE, batch_size, 0);
459  }
460  void add_input_height(uint32_t input_height) {
461  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_INPUT_HEIGHT, input_height, 0);
462  }
463  void add_input_width(uint32_t input_width) {
464  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_INPUT_WIDTH, input_width, 0);
465  }
466  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
467  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_KERNEL_SIZE, kernel_size);
468  }
469  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
470  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_STRIDE, stride);
471  }
472  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
473  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_PADDING, padding);
474  }
475  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
476  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_DILATION, dilation);
477  }
478  void add_has_bias(bool has_bias) {
479  fbb_.AddElement<uint8_t>(PrepareConv2dWeightsOp::VT_HAS_BIAS, static_cast<uint8_t>(has_bias), 0);
480  }
481  void add_groups(uint32_t groups) {
482  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_GROUPS, groups, 0);
483  }
484  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
485  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_DEVICE, device);
486  }
487  void add_conv2d_config(::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config) {
488  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_CONV2D_CONFIG, conv2d_config);
489  }
490  explicit PrepareConv2dWeightsOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
491  : fbb_(_fbb) {
492  start_ = fbb_.StartTable();
493  }
494  ::flatbuffers::Offset<PrepareConv2dWeightsOp> Finish() {
495  const auto end = fbb_.EndTable(start_);
496  auto o = ::flatbuffers::Offset<PrepareConv2dWeightsOp>(end);
497  return o;
498  }
499 };
500 
501 inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> CreatePrepareConv2dWeightsOp(
502  ::flatbuffers::FlatBufferBuilder &_fbb,
503  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight_tensor = 0,
504  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
505  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> input_memory_config = 0,
507  ::flatbuffers::Offset<::flatbuffers::String> weights_format = 0,
508  uint32_t in_channels = 0,
509  uint32_t out_channels = 0,
510  uint32_t batch_size = 0,
511  uint32_t input_height = 0,
512  uint32_t input_width = 0,
513  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
514  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
515  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
516  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
517  bool has_bias = false,
518  uint32_t groups = 0,
519  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
520  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
521  PrepareConv2dWeightsOpBuilder builder_(_fbb);
522  builder_.add_conv2d_config(conv2d_config);
523  builder_.add_device(device);
524  builder_.add_groups(groups);
525  builder_.add_dilation(dilation);
526  builder_.add_padding(padding);
527  builder_.add_stride(stride);
528  builder_.add_kernel_size(kernel_size);
529  builder_.add_input_width(input_width);
530  builder_.add_input_height(input_height);
531  builder_.add_batch_size(batch_size);
532  builder_.add_out_channels(out_channels);
533  builder_.add_in_channels(in_channels);
534  builder_.add_weights_format(weights_format);
535  builder_.add_input_memory_config(input_memory_config);
536  builder_.add_out(out);
537  builder_.add_weight_tensor(weight_tensor);
538  builder_.add_input_tensor_layout(input_tensor_layout);
539  builder_.add_has_bias(has_bias);
540  return builder_.Finish();
541 }
542 
545  static auto constexpr Create = CreatePrepareConv2dWeightsOp;
546 };
547 
548 inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> CreatePrepareConv2dWeightsOpDirect(
549  ::flatbuffers::FlatBufferBuilder &_fbb,
550  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight_tensor = 0,
551  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
552  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> input_memory_config = 0,
554  const char *weights_format = nullptr,
555  uint32_t in_channels = 0,
556  uint32_t out_channels = 0,
557  uint32_t batch_size = 0,
558  uint32_t input_height = 0,
559  uint32_t input_width = 0,
560  const std::vector<int32_t> *kernel_size = nullptr,
561  const std::vector<int32_t> *stride = nullptr,
562  const std::vector<int32_t> *padding = nullptr,
563  const std::vector<int32_t> *dilation = nullptr,
564  bool has_bias = false,
565  uint32_t groups = 0,
566  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
567  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
568  auto weights_format__ = weights_format ? _fbb.CreateString(weights_format) : 0;
569  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
570  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
571  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
572  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
574  _fbb,
575  weight_tensor,
576  out,
577  input_memory_config,
578  input_tensor_layout,
579  weights_format__,
580  in_channels,
581  out_channels,
582  batch_size,
583  input_height,
584  input_width,
585  kernel_size__,
586  stride__,
587  padding__,
588  dilation__,
589  has_bias,
590  groups,
591  device,
592  conv2d_config);
593 }
594 
595 struct Conv2dOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
597  struct Traits;
598  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
599  VT_INPUT = 4,
601  VT_BIAS = 8,
602  VT_OUT = 10,
603  VT_DEVICE = 12,
604  VT_IN_CHANNELS = 14,
605  VT_OUT_CHANNELS = 16,
606  VT_BATCH_SIZE = 18,
607  VT_INPUT_HEIGHT = 20,
608  VT_INPUT_WIDTH = 22,
609  VT_KERNEL_SIZE = 24,
610  VT_STRIDE = 26,
611  VT_PADDING = 28,
612  VT_DILATION = 30,
613  VT_GROUPS = 32,
614  VT_CONV2D_CONFIG = 34
615  };
616  const tt::target::ttnn::TensorRef *input() const {
617  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_INPUT);
618  }
619  const tt::target::ttnn::TensorRef *weight() const {
620  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT);
621  }
622  const tt::target::ttnn::TensorRef *bias() const {
623  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
624  }
625  const tt::target::ttnn::TensorRef *out() const {
626  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
627  }
628  const tt::target::DeviceRef *device() const {
629  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
630  }
631  uint32_t in_channels() const {
632  return GetField<uint32_t>(VT_IN_CHANNELS, 0);
633  }
634  uint32_t out_channels() const {
635  return GetField<uint32_t>(VT_OUT_CHANNELS, 0);
636  }
637  uint32_t batch_size() const {
638  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
639  }
640  uint32_t input_height() const {
641  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
642  }
643  uint32_t input_width() const {
644  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
645  }
646  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
647  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
648  }
649  const ::flatbuffers::Vector<int32_t> *stride() const {
650  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
651  }
652  const ::flatbuffers::Vector<int32_t> *padding() const {
653  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
654  }
655  const ::flatbuffers::Vector<int32_t> *dilation() const {
656  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
657  }
658  uint32_t groups() const {
659  return GetField<uint32_t>(VT_GROUPS, 0);
660  }
661  const tt::target::ttnn::Conv2dConfig *conv2d_config() const {
662  return GetPointer<const tt::target::ttnn::Conv2dConfig *>(VT_CONV2D_CONFIG);
663  }
664  bool Verify(::flatbuffers::Verifier &verifier) const {
665  return VerifyTableStart(verifier) &&
666  VerifyOffset(verifier, VT_INPUT) &&
667  verifier.VerifyTable(input()) &&
668  VerifyOffset(verifier, VT_WEIGHT) &&
669  verifier.VerifyTable(weight()) &&
670  VerifyOffset(verifier, VT_BIAS) &&
671  verifier.VerifyTable(bias()) &&
672  VerifyOffset(verifier, VT_OUT) &&
673  verifier.VerifyTable(out()) &&
674  VerifyOffset(verifier, VT_DEVICE) &&
675  verifier.VerifyTable(device()) &&
676  VerifyField<uint32_t>(verifier, VT_IN_CHANNELS, 4) &&
677  VerifyField<uint32_t>(verifier, VT_OUT_CHANNELS, 4) &&
678  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
679  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
680  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
681  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
682  verifier.VerifyVector(kernel_size()) &&
683  VerifyOffset(verifier, VT_STRIDE) &&
684  verifier.VerifyVector(stride()) &&
685  VerifyOffset(verifier, VT_PADDING) &&
686  verifier.VerifyVector(padding()) &&
687  VerifyOffset(verifier, VT_DILATION) &&
688  verifier.VerifyVector(dilation()) &&
689  VerifyField<uint32_t>(verifier, VT_GROUPS, 4) &&
690  VerifyOffset(verifier, VT_CONV2D_CONFIG) &&
691  verifier.VerifyTable(conv2d_config()) &&
692  verifier.EndTable();
693  }
694 };
695 
697  typedef Conv2dOp Table;
698  ::flatbuffers::FlatBufferBuilder &fbb_;
699  ::flatbuffers::uoffset_t start_;
700  void add_input(::flatbuffers::Offset<tt::target::ttnn::TensorRef> input) {
701  fbb_.AddOffset(Conv2dOp::VT_INPUT, input);
702  }
703  void add_weight(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight) {
704  fbb_.AddOffset(Conv2dOp::VT_WEIGHT, weight);
705  }
706  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
707  fbb_.AddOffset(Conv2dOp::VT_BIAS, bias);
708  }
709  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
710  fbb_.AddOffset(Conv2dOp::VT_OUT, out);
711  }
712  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
713  fbb_.AddOffset(Conv2dOp::VT_DEVICE, device);
714  }
715  void add_in_channels(uint32_t in_channels) {
716  fbb_.AddElement<uint32_t>(Conv2dOp::VT_IN_CHANNELS, in_channels, 0);
717  }
718  void add_out_channels(uint32_t out_channels) {
719  fbb_.AddElement<uint32_t>(Conv2dOp::VT_OUT_CHANNELS, out_channels, 0);
720  }
721  void add_batch_size(uint32_t batch_size) {
722  fbb_.AddElement<uint32_t>(Conv2dOp::VT_BATCH_SIZE, batch_size, 0);
723  }
724  void add_input_height(uint32_t input_height) {
725  fbb_.AddElement<uint32_t>(Conv2dOp::VT_INPUT_HEIGHT, input_height, 0);
726  }
727  void add_input_width(uint32_t input_width) {
728  fbb_.AddElement<uint32_t>(Conv2dOp::VT_INPUT_WIDTH, input_width, 0);
729  }
730  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
731  fbb_.AddOffset(Conv2dOp::VT_KERNEL_SIZE, kernel_size);
732  }
733  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
734  fbb_.AddOffset(Conv2dOp::VT_STRIDE, stride);
735  }
736  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
737  fbb_.AddOffset(Conv2dOp::VT_PADDING, padding);
738  }
739  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
740  fbb_.AddOffset(Conv2dOp::VT_DILATION, dilation);
741  }
742  void add_groups(uint32_t groups) {
743  fbb_.AddElement<uint32_t>(Conv2dOp::VT_GROUPS, groups, 0);
744  }
745  void add_conv2d_config(::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config) {
746  fbb_.AddOffset(Conv2dOp::VT_CONV2D_CONFIG, conv2d_config);
747  }
748  explicit Conv2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
749  : fbb_(_fbb) {
750  start_ = fbb_.StartTable();
751  }
752  ::flatbuffers::Offset<Conv2dOp> Finish() {
753  const auto end = fbb_.EndTable(start_);
754  auto o = ::flatbuffers::Offset<Conv2dOp>(end);
755  return o;
756  }
757 };
758 
759 inline ::flatbuffers::Offset<Conv2dOp> CreateConv2dOp(
760  ::flatbuffers::FlatBufferBuilder &_fbb,
761  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
762  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
763  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
764  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
765  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
766  uint32_t in_channels = 0,
767  uint32_t out_channels = 0,
768  uint32_t batch_size = 0,
769  uint32_t input_height = 0,
770  uint32_t input_width = 0,
771  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
772  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
773  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
774  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
775  uint32_t groups = 0,
776  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
777  Conv2dOpBuilder builder_(_fbb);
778  builder_.add_conv2d_config(conv2d_config);
779  builder_.add_groups(groups);
780  builder_.add_dilation(dilation);
781  builder_.add_padding(padding);
782  builder_.add_stride(stride);
783  builder_.add_kernel_size(kernel_size);
784  builder_.add_input_width(input_width);
785  builder_.add_input_height(input_height);
786  builder_.add_batch_size(batch_size);
787  builder_.add_out_channels(out_channels);
788  builder_.add_in_channels(in_channels);
789  builder_.add_device(device);
790  builder_.add_out(out);
791  builder_.add_bias(bias);
792  builder_.add_weight(weight);
793  builder_.add_input(input);
794  return builder_.Finish();
795 }
796 
798  using type = Conv2dOp;
799  static auto constexpr Create = CreateConv2dOp;
800 };
801 
802 inline ::flatbuffers::Offset<Conv2dOp> CreateConv2dOpDirect(
803  ::flatbuffers::FlatBufferBuilder &_fbb,
804  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
805  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
806  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
807  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
808  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
809  uint32_t in_channels = 0,
810  uint32_t out_channels = 0,
811  uint32_t batch_size = 0,
812  uint32_t input_height = 0,
813  uint32_t input_width = 0,
814  const std::vector<int32_t> *kernel_size = nullptr,
815  const std::vector<int32_t> *stride = nullptr,
816  const std::vector<int32_t> *padding = nullptr,
817  const std::vector<int32_t> *dilation = nullptr,
818  uint32_t groups = 0,
819  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
820  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
821  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
822  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
823  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
825  _fbb,
826  input,
827  weight,
828  bias,
829  out,
830  device,
831  in_channels,
832  out_channels,
833  batch_size,
834  input_height,
835  input_width,
836  kernel_size__,
837  stride__,
838  padding__,
839  dilation__,
840  groups,
841  conv2d_config);
842 }
843 
844 struct ConvTranspose2dOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
846  struct Traits;
847  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
848  VT_INPUT = 4,
849  VT_WEIGHT = 6,
850  VT_BIAS = 8,
851  VT_OUT = 10,
852  VT_DEVICE = 12,
853  VT_IN_CHANNELS = 14,
854  VT_OUT_CHANNELS = 16,
855  VT_BATCH_SIZE = 18,
856  VT_INPUT_HEIGHT = 20,
857  VT_INPUT_WIDTH = 22,
858  VT_KERNEL_SIZE = 24,
859  VT_STRIDE = 26,
860  VT_PADDING = 28,
862  VT_DILATION = 32,
863  VT_GROUPS = 34
864  };
865  const tt::target::ttnn::TensorRef *input() const {
866  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_INPUT);
867  }
868  const tt::target::ttnn::TensorRef *weight() const {
869  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT);
870  }
871  const tt::target::ttnn::TensorRef *bias() const {
872  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
873  }
874  const tt::target::ttnn::TensorRef *out() const {
875  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
876  }
877  const tt::target::DeviceRef *device() const {
878  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
879  }
880  uint32_t in_channels() const {
881  return GetField<uint32_t>(VT_IN_CHANNELS, 0);
882  }
883  uint32_t out_channels() const {
884  return GetField<uint32_t>(VT_OUT_CHANNELS, 0);
885  }
886  uint32_t batch_size() const {
887  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
888  }
889  uint32_t input_height() const {
890  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
891  }
892  uint32_t input_width() const {
893  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
894  }
895  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
896  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
897  }
898  const ::flatbuffers::Vector<int32_t> *stride() const {
899  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
900  }
901  const ::flatbuffers::Vector<int32_t> *padding() const {
902  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
903  }
904  const ::flatbuffers::Vector<int32_t> *output_padding() const {
905  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OUTPUT_PADDING);
906  }
907  const ::flatbuffers::Vector<int32_t> *dilation() const {
908  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
909  }
910  uint32_t groups() const {
911  return GetField<uint32_t>(VT_GROUPS, 0);
912  }
913  bool Verify(::flatbuffers::Verifier &verifier) const {
914  return VerifyTableStart(verifier) &&
915  VerifyOffset(verifier, VT_INPUT) &&
916  verifier.VerifyTable(input()) &&
917  VerifyOffset(verifier, VT_WEIGHT) &&
918  verifier.VerifyTable(weight()) &&
919  VerifyOffset(verifier, VT_BIAS) &&
920  verifier.VerifyTable(bias()) &&
921  VerifyOffset(verifier, VT_OUT) &&
922  verifier.VerifyTable(out()) &&
923  VerifyOffset(verifier, VT_DEVICE) &&
924  verifier.VerifyTable(device()) &&
925  VerifyField<uint32_t>(verifier, VT_IN_CHANNELS, 4) &&
926  VerifyField<uint32_t>(verifier, VT_OUT_CHANNELS, 4) &&
927  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
928  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
929  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
930  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
931  verifier.VerifyVector(kernel_size()) &&
932  VerifyOffset(verifier, VT_STRIDE) &&
933  verifier.VerifyVector(stride()) &&
934  VerifyOffset(verifier, VT_PADDING) &&
935  verifier.VerifyVector(padding()) &&
936  VerifyOffset(verifier, VT_OUTPUT_PADDING) &&
937  verifier.VerifyVector(output_padding()) &&
938  VerifyOffset(verifier, VT_DILATION) &&
939  verifier.VerifyVector(dilation()) &&
940  VerifyField<uint32_t>(verifier, VT_GROUPS, 4) &&
941  verifier.EndTable();
942  }
943 };
944 
947  ::flatbuffers::FlatBufferBuilder &fbb_;
948  ::flatbuffers::uoffset_t start_;
949  void add_input(::flatbuffers::Offset<tt::target::ttnn::TensorRef> input) {
950  fbb_.AddOffset(ConvTranspose2dOp::VT_INPUT, input);
951  }
952  void add_weight(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight) {
953  fbb_.AddOffset(ConvTranspose2dOp::VT_WEIGHT, weight);
954  }
955  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
956  fbb_.AddOffset(ConvTranspose2dOp::VT_BIAS, bias);
957  }
958  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
959  fbb_.AddOffset(ConvTranspose2dOp::VT_OUT, out);
960  }
961  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
962  fbb_.AddOffset(ConvTranspose2dOp::VT_DEVICE, device);
963  }
964  void add_in_channels(uint32_t in_channels) {
965  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_IN_CHANNELS, in_channels, 0);
966  }
967  void add_out_channels(uint32_t out_channels) {
968  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_OUT_CHANNELS, out_channels, 0);
969  }
970  void add_batch_size(uint32_t batch_size) {
971  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_BATCH_SIZE, batch_size, 0);
972  }
973  void add_input_height(uint32_t input_height) {
974  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_INPUT_HEIGHT, input_height, 0);
975  }
976  void add_input_width(uint32_t input_width) {
977  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_INPUT_WIDTH, input_width, 0);
978  }
979  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
980  fbb_.AddOffset(ConvTranspose2dOp::VT_KERNEL_SIZE, kernel_size);
981  }
982  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
983  fbb_.AddOffset(ConvTranspose2dOp::VT_STRIDE, stride);
984  }
985  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
986  fbb_.AddOffset(ConvTranspose2dOp::VT_PADDING, padding);
987  }
988  void add_output_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> output_padding) {
989  fbb_.AddOffset(ConvTranspose2dOp::VT_OUTPUT_PADDING, output_padding);
990  }
991  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
992  fbb_.AddOffset(ConvTranspose2dOp::VT_DILATION, dilation);
993  }
994  void add_groups(uint32_t groups) {
995  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_GROUPS, groups, 0);
996  }
997  explicit ConvTranspose2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
998  : fbb_(_fbb) {
999  start_ = fbb_.StartTable();
1000  }
1001  ::flatbuffers::Offset<ConvTranspose2dOp> Finish() {
1002  const auto end = fbb_.EndTable(start_);
1003  auto o = ::flatbuffers::Offset<ConvTranspose2dOp>(end);
1004  return o;
1005  }
1006 };
1007 
1008 inline ::flatbuffers::Offset<ConvTranspose2dOp> CreateConvTranspose2dOp(
1009  ::flatbuffers::FlatBufferBuilder &_fbb,
1010  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
1011  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
1012  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
1013  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
1014  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
1015  uint32_t in_channels = 0,
1016  uint32_t out_channels = 0,
1017  uint32_t batch_size = 0,
1018  uint32_t input_height = 0,
1019  uint32_t input_width = 0,
1020  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
1021  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
1022  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
1023  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> output_padding = 0,
1024  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
1025  uint32_t groups = 0) {
1026  ConvTranspose2dOpBuilder builder_(_fbb);
1027  builder_.add_groups(groups);
1028  builder_.add_dilation(dilation);
1029  builder_.add_output_padding(output_padding);
1030  builder_.add_padding(padding);
1031  builder_.add_stride(stride);
1032  builder_.add_kernel_size(kernel_size);
1033  builder_.add_input_width(input_width);
1034  builder_.add_input_height(input_height);
1035  builder_.add_batch_size(batch_size);
1036  builder_.add_out_channels(out_channels);
1037  builder_.add_in_channels(in_channels);
1038  builder_.add_device(device);
1039  builder_.add_out(out);
1040  builder_.add_bias(bias);
1041  builder_.add_weight(weight);
1042  builder_.add_input(input);
1043  return builder_.Finish();
1044 }
1045 
1048  static auto constexpr Create = CreateConvTranspose2dOp;
1049 };
1050 
1051 inline ::flatbuffers::Offset<ConvTranspose2dOp> CreateConvTranspose2dOpDirect(
1052  ::flatbuffers::FlatBufferBuilder &_fbb,
1053  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
1054  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
1055  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
1056  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
1057  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
1058  uint32_t in_channels = 0,
1059  uint32_t out_channels = 0,
1060  uint32_t batch_size = 0,
1061  uint32_t input_height = 0,
1062  uint32_t input_width = 0,
1063  const std::vector<int32_t> *kernel_size = nullptr,
1064  const std::vector<int32_t> *stride = nullptr,
1065  const std::vector<int32_t> *padding = nullptr,
1066  const std::vector<int32_t> *output_padding = nullptr,
1067  const std::vector<int32_t> *dilation = nullptr,
1068  uint32_t groups = 0) {
1069  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
1070  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
1071  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
1072  auto output_padding__ = output_padding ? _fbb.CreateVector<int32_t>(*output_padding) : 0;
1073  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
1075  _fbb,
1076  input,
1077  weight,
1078  bias,
1079  out,
1080  device,
1081  in_channels,
1082  out_channels,
1083  batch_size,
1084  input_height,
1085  input_width,
1086  kernel_size__,
1087  stride__,
1088  padding__,
1089  output_padding__,
1090  dilation__,
1091  groups);
1092 }
1093 
1094 } // namespace ttnn
1095 } // namespace target
1096 } // namespace tt
1097 
1098 #endif // FLATBUFFERS_GENERATED_CONV_TT_TARGET_TTNN_H_
VT_PREPROCESS_WEIGHTS_ON_DEVICE
Definition: conv_generated.h:53
VT_INPUT
Definition: conv_generated.h:599
VT_DTYPE
Definition: conv_generated.h:39
VT_REALLOCATE_HALO_OUTPUT
Definition: conv_generated.h:44
VT_DILATION
Definition: conv_generated.h:338
VT_WEIGHT_TENSOR
Definition: conv_generated.h:325
VT_ACT_BLOCK_W_DIV
Definition: conv_generated.h:46
VT_PADDING
Definition: conv_generated.h:337
VT_WEIGHT
Definition: conv_generated.h:600
VT_ENABLE_WEIGHTS_DOUBLE_BUFFER
Definition: conv_generated.h:56
VT_OVERRIDE_SHARDING_CONFIG
Definition: conv_generated.h:48
VT_STRIDE
Definition: conv_generated.h:336
VT_KERNEL_SIZE
Definition: conv_generated.h:335
VT_BATCH_SIZE
Definition: conv_generated.h:332
VT_CORE_GRID
Definition: conv_generated.h:50
VT_GROUPS
Definition: conv_generated.h:340
VT_WEIGHTS_FORMAT
Definition: conv_generated.h:329
VT_ENABLE_SPLIT_READER
Definition: conv_generated.h:57
VT_DEALLOCATE_ACTIVATION
Definition: conv_generated.h:43
VT_TRANSPOSE_SHARDS
Definition: conv_generated.h:51
VT_INPUT_CHANNELS_ALIGNMENT
Definition: conv_generated.h:42
VT_SHARD_LAYOUT
Definition: conv_generated.h:49
VT_OUTPUT_LAYOUT
Definition: conv_generated.h:52
VT_WEIGHTS_DTYPE
Definition: conv_generated.h:40
VT_INPUT_HEIGHT
Definition: conv_generated.h:333
VT_INPUT_MEMORY_CONFIG
Definition: conv_generated.h:327
VT_ENABLE_ACT_DOUBLE_BUFFER
Definition: conv_generated.h:55
VT_IN_CHANNELS
Definition: conv_generated.h:330
VT_DEVICE
Definition: conv_generated.h:341
VT_INPUT_WIDTH
Definition: conv_generated.h:334
VT_ACT_BLOCK_H_OVERRIDE
Definition: conv_generated.h:45
VT_BIAS
Definition: conv_generated.h:601
VT_OUT
Definition: conv_generated.h:326
VT_RESHARD_IF_NOT_OPTIMAL
Definition: conv_generated.h:47
VT_OUTPUT_PADDING
Definition: conv_generated.h:861
VT_ACTIVATION
Definition: conv_generated.h:41
VT_OUT_CHANNELS
Definition: conv_generated.h:331
VT_ALWAYS_PREPROCESS_WEIGHTS
Definition: conv_generated.h:54
VT_INPUT_TENSOR_LAYOUT
Definition: conv_generated.h:328
VT_HAS_BIAS
Definition: conv_generated.h:339
inline ::flatbuffers::Offset< Conv2dOp > CreateConv2dOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:759
inline ::flatbuffers::Offset< Conv2dOp > CreateConv2dOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:802
inline ::flatbuffers::Offset< Conv2dConfig > CreateConv2dConfigDirect(::flatbuffers::FlatBufferBuilder &_fbb, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::DataType weights_dtype=tt::target::DataType::Float32, const char *activation=nullptr, uint32_t input_channels_alignment=0, bool deallocate_activation=false, bool reallocate_halo_output=false, uint32_t act_block_h_override=0, uint32_t act_block_w_div=0, bool reshard_if_not_optimal=false, bool override_sharding_config=false, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, bool transpose_shards=false, tt::target::TensorLayout output_layout=tt::target::TensorLayout::RowMajor, bool preprocess_weights_on_device=false, bool always_preprocess_weights=false, bool enable_act_double_buffer=false, bool enable_weights_double_buffer=false, bool enable_split_reader=false, bool enable_subblock_padding=false)
Definition: conv_generated.h:274
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > CreatePrepareConv2dWeightsOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset<::flatbuffers::String > weights_format=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:501
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > CreatePrepareConv2dWeightsOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, const char *weights_format=nullptr, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:548
inline ::flatbuffers::Offset< ConvTranspose2dOp > CreateConvTranspose2dOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *output_padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0)
Definition: conv_generated.h:1051
TensorMemoryLayout
Definition: types_generated.h:62
inline ::flatbuffers::Offset< Conv2dConfig > CreateConv2dConfig(::flatbuffers::FlatBufferBuilder &_fbb, tt::target::DataType dtype=tt::target::DataType::Float32, tt::target::DataType weights_dtype=tt::target::DataType::Float32, ::flatbuffers::Offset<::flatbuffers::String > activation=0, uint32_t input_channels_alignment=0, bool deallocate_activation=false, bool reallocate_halo_output=false, uint32_t act_block_h_override=0, uint32_t act_block_w_div=0, bool reshard_if_not_optimal=false, bool override_sharding_config=false, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, bool transpose_shards=false, tt::target::TensorLayout output_layout=tt::target::TensorLayout::RowMajor, bool preprocess_weights_on_device=false, bool always_preprocess_weights=false, bool enable_act_double_buffer=false, bool enable_weights_double_buffer=false, bool enable_split_reader=false, bool enable_subblock_padding=false)
Definition: conv_generated.h:223
inline ::flatbuffers::Offset< ConvTranspose2dOp > CreateConvTranspose2dOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> output_padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0)
Definition: conv_generated.h:1008
TensorLayout
Definition: types_generated.h:250
DataType
Definition: types_generated.h:81
Definition: debug_info_generated.h:18
Definition: debug_info_generated.h:36
Definition: conv_generated.h:148
void add_enable_subblock_padding(bool enable_subblock_padding)
Definition: conv_generated.h:209
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:150
void add_activation(::flatbuffers::Offset<::flatbuffers::String > activation)
Definition: conv_generated.h:158
void add_enable_split_reader(bool enable_split_reader)
Definition: conv_generated.h:206
void add_reallocate_halo_output(bool reallocate_halo_output)
Definition: conv_generated.h:167
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:151
void add_weights_dtype(tt::target::DataType weights_dtype)
Definition: conv_generated.h:155
void add_enable_act_double_buffer(bool enable_act_double_buffer)
Definition: conv_generated.h:200
void add_shard_layout(tt::target::ttnn::TensorMemoryLayout shard_layout)
Definition: conv_generated.h:182
void add_core_grid(::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid)
Definition: conv_generated.h:185
void add_transpose_shards(bool transpose_shards)
Definition: conv_generated.h:188
void add_override_sharding_config(bool override_sharding_config)
Definition: conv_generated.h:179
void add_preprocess_weights_on_device(bool preprocess_weights_on_device)
Definition: conv_generated.h:194
void add_enable_weights_double_buffer(bool enable_weights_double_buffer)
Definition: conv_generated.h:203
::flatbuffers::Offset< Conv2dConfig > Finish()
Definition: conv_generated.h:216
void add_reshard_if_not_optimal(bool reshard_if_not_optimal)
Definition: conv_generated.h:176
void add_dtype(tt::target::DataType dtype)
Definition: conv_generated.h:152
void add_input_channels_alignment(uint32_t input_channels_alignment)
Definition: conv_generated.h:161
void add_act_block_h_override(uint32_t act_block_h_override)
Definition: conv_generated.h:170
void add_deallocate_activation(bool deallocate_activation)
Definition: conv_generated.h:164
Conv2dConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:212
void add_always_preprocess_weights(bool always_preprocess_weights)
Definition: conv_generated.h:197
Conv2dConfig Table
Definition: conv_generated.h:149
void add_act_block_w_div(uint32_t act_block_w_div)
Definition: conv_generated.h:173
void add_output_layout(tt::target::TensorLayout output_layout)
Definition: conv_generated.h:191
Definition: conv_generated.h:269
static constexpr auto Create
Definition: conv_generated.h:271
Conv2dConfig type
Definition: conv_generated.h:270
Definition: conv_generated.h:696
void add_in_channels(uint32_t in_channels)
Definition: conv_generated.h:715
void add_conv2d_config(::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config)
Definition: conv_generated.h:745
void add_input_width(uint32_t input_width)
Definition: conv_generated.h:727
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: conv_generated.h:739
Conv2dOp Table
Definition: conv_generated.h:697
void add_out_channels(uint32_t out_channels)
Definition: conv_generated.h:718
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: conv_generated.h:736
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: conv_generated.h:733
::flatbuffers::Offset< Conv2dOp > Finish()
Definition: conv_generated.h:752
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: conv_generated.h:712
Conv2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:748
void add_groups(uint32_t groups)
Definition: conv_generated.h:742
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: conv_generated.h:709
void add_weight(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight)
Definition: conv_generated.h:703
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: conv_generated.h:730
void add_input(::flatbuffers::Offset< tt::target::ttnn::TensorRef > input)
Definition: conv_generated.h:700
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:699
void add_batch_size(uint32_t batch_size)
Definition: conv_generated.h:721
void add_input_height(uint32_t input_height)
Definition: conv_generated.h:724
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: conv_generated.h:706
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:698
Definition: conv_generated.h:797
static constexpr auto Create
Definition: conv_generated.h:799
Conv2dOp type
Definition: conv_generated.h:798
Definition: conv_generated.h:945
void add_input(::flatbuffers::Offset< tt::target::ttnn::TensorRef > input)
Definition: conv_generated.h:949
void add_groups(uint32_t groups)
Definition: conv_generated.h:994
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: conv_generated.h:961
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: conv_generated.h:958
ConvTranspose2dOp Table
Definition: conv_generated.h:946
void add_input_height(uint32_t input_height)
Definition: conv_generated.h:973
void add_weight(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight)
Definition: conv_generated.h:952
::flatbuffers::Offset< ConvTranspose2dOp > Finish()
Definition: conv_generated.h:1001
ConvTranspose2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:997
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:948
void add_in_channels(uint32_t in_channels)
Definition: conv_generated.h:964
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: conv_generated.h:982
void add_batch_size(uint32_t batch_size)
Definition: conv_generated.h:970
void add_out_channels(uint32_t out_channels)
Definition: conv_generated.h:967
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: conv_generated.h:955
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: conv_generated.h:979
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:947
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: conv_generated.h:985
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: conv_generated.h:991
void add_output_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> output_padding)
Definition: conv_generated.h:988
void add_input_width(uint32_t input_width)
Definition: conv_generated.h:976
Definition: conv_generated.h:1046
ConvTranspose2dOp type
Definition: conv_generated.h:1047
static constexpr auto Create
Definition: conv_generated.h:1048
tt::target::DataType dtype() const
Definition: conv_generated.h:60
const tt::target::ttnn::TensorRef * bias() const
Definition: conv_generated.h:622
const tt::target::ttnn::TensorRef * weight_tensor() const
Definition: conv_generated.h:344
uint32_t in_channels() const
Definition: conv_generated.h:359
bool reshard_if_not_optimal() const
Definition: conv_generated.h:84
uint32_t batch_size() const
Definition: conv_generated.h:365
const tt::target::ttnn::MemoryConfig * input_memory_config() const
Definition: conv_generated.h:350
ConvTranspose2dOpBuilder Builder
Definition: conv_generated.h:845
uint32_t input_width() const
Definition: conv_generated.h:371
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout() const
Definition: conv_generated.h:90
const tt::target::ttnn::TensorRef * out() const
Definition: conv_generated.h:347
bool transpose_shards() const
Definition: conv_generated.h:96
tt::target::TensorLayout output_layout() const
Definition: conv_generated.h:99
const tt::target::ttnn::Conv2dConfig * conv2d_config() const
Definition: conv_generated.h:395
bool preprocess_weights_on_device() const
Definition: conv_generated.h:102
bool has_bias() const
Definition: conv_generated.h:386
bool reallocate_halo_output() const
Definition: conv_generated.h:75
const ::flatbuffers::Vector< int32_t > * dilation() const
Definition: conv_generated.h:383
const tt::target::ttnn::TensorRef * input() const
Definition: conv_generated.h:616
bool always_preprocess_weights() const
Definition: conv_generated.h:105
uint32_t act_block_h_override() const
Definition: conv_generated.h:78
const ::flatbuffers::String * activation() const
Definition: conv_generated.h:66
uint32_t input_height() const
Definition: conv_generated.h:368
const ::flatbuffers::Vector< int32_t > * stride() const
Definition: conv_generated.h:377
bool override_sharding_config() const
Definition: conv_generated.h:87
const ::flatbuffers::Vector< int32_t > * padding() const
Definition: conv_generated.h:380
const tt::target::ttnn::TensorRef * weight() const
Definition: conv_generated.h:619
Conv2dConfigBuilder Builder
Definition: conv_generated.h:36
const ::flatbuffers::Vector< int32_t > * output_padding() const
Definition: conv_generated.h:904
Conv2dOpBuilder Builder
Definition: conv_generated.h:596
const ::flatbuffers::Vector< int32_t > * kernel_size() const
Definition: conv_generated.h:374
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: conv_generated.h:120
bool enable_weights_double_buffer() const
Definition: conv_generated.h:111
tt::target::TensorLayout input_tensor_layout() const
Definition: conv_generated.h:353
uint32_t groups() const
Definition: conv_generated.h:389
bool enable_subblock_padding() const
Definition: conv_generated.h:117
uint32_t input_channels_alignment() const
Definition: conv_generated.h:69
const ::flatbuffers::String * weights_format() const
Definition: conv_generated.h:356
uint32_t out_channels() const
Definition: conv_generated.h:362
const tt::target::ttnn::CoreRangeSet * core_grid() const
Definition: conv_generated.h:93
uint32_t act_block_w_div() const
Definition: conv_generated.h:81
tt::target::DataType weights_dtype() const
Definition: conv_generated.h:63
bool deallocate_activation() const
Definition: conv_generated.h:72
const tt::target::DeviceRef * device() const
Definition: conv_generated.h:392
bool enable_split_reader() const
Definition: conv_generated.h:114
bool enable_act_double_buffer() const
Definition: conv_generated.h:108
PrepareConv2dWeightsOpBuilder Builder
Definition: conv_generated.h:322
Definition: conv_generated.h:432
void add_input_height(uint32_t input_height)
Definition: conv_generated.h:460
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: conv_generated.h:475
void add_input_tensor_layout(tt::target::TensorLayout input_tensor_layout)
Definition: conv_generated.h:445
PrepareConv2dWeightsOp Table
Definition: conv_generated.h:433
PrepareConv2dWeightsOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:490
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:434
void add_has_bias(bool has_bias)
Definition: conv_generated.h:478
void add_in_channels(uint32_t in_channels)
Definition: conv_generated.h:451
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: conv_generated.h:472
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: conv_generated.h:439
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: conv_generated.h:484
void add_weights_format(::flatbuffers::Offset<::flatbuffers::String > weights_format)
Definition: conv_generated.h:448
void add_conv2d_config(::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config)
Definition: conv_generated.h:487
void add_out_channels(uint32_t out_channels)
Definition: conv_generated.h:454
void add_input_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config)
Definition: conv_generated.h:442
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: conv_generated.h:466
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: conv_generated.h:469
void add_groups(uint32_t groups)
Definition: conv_generated.h:481
void add_batch_size(uint32_t batch_size)
Definition: conv_generated.h:457
void add_weight_tensor(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor)
Definition: conv_generated.h:436
::flatbuffers::Offset< PrepareConv2dWeightsOp > Finish()
Definition: conv_generated.h:494
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:435
void add_input_width(uint32_t input_width)
Definition: conv_generated.h:463
Definition: conv_generated.h:543
static constexpr auto Create
Definition: conv_generated.h:545
PrepareConv2dWeightsOp type
Definition: conv_generated.h:544