TT-MLIR
conv_generated.h
Go to the documentation of this file.
1 // automatically generated by the FlatBuffers compiler, do not modify
2 
3 
4 #ifndef FLATBUFFERS_GENERATED_CONV_TT_TARGET_TTNN_H_
5 #define FLATBUFFERS_GENERATED_CONV_TT_TARGET_TTNN_H_
6 
7 #include "flatbuffers/flatbuffers.h"
8 
9 // Ensure the included flatbuffers.h is the same version as when this file was
10 // generated, otherwise it may not be compatible.
11 static_assert(FLATBUFFERS_VERSION_MAJOR == 24 &&
12  FLATBUFFERS_VERSION_MINOR == 3 &&
13  FLATBUFFERS_VERSION_REVISION == 25,
14  "Non-compatible flatbuffers version included");
15 
18 
19 namespace tt {
20 namespace target {
21 namespace ttnn {
22 
23 struct Conv2dConfig;
24 struct Conv2dConfigBuilder;
25 
27 struct PrepareConv2dWeightsOpBuilder;
28 
29 struct Conv2dOp;
30 struct Conv2dOpBuilder;
31 
32 struct ConvTranspose2dOp;
33 struct ConvTranspose2dOpBuilder;
34 
35 struct Conv2dConfig FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
37  struct Traits;
38  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
39  return "tt.target.ttnn.Conv2dConfig";
40  }
41  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
42  VT_DTYPE = 4,
60  VT_ENABLE_SUBBLOCK_PADDING = 40
61  };
62  ::flatbuffers::Optional<tt::target::DataType> dtype() const {
63  return GetOptional<uint16_t, tt::target::DataType>(VT_DTYPE);
64  }
65  ::flatbuffers::Optional<tt::target::DataType> weights_dtype() const {
66  return GetOptional<uint16_t, tt::target::DataType>(VT_WEIGHTS_DTYPE);
67  }
68  const ::flatbuffers::String *activation() const {
69  return GetPointer<const ::flatbuffers::String *>(VT_ACTIVATION);
70  }
71  ::flatbuffers::Optional<bool> deallocate_activation() const {
72  return GetOptional<uint8_t, bool>(VT_DEALLOCATE_ACTIVATION);
73  }
74  ::flatbuffers::Optional<bool> reallocate_halo_output() const {
75  return GetOptional<uint8_t, bool>(VT_REALLOCATE_HALO_OUTPUT);
76  }
77  ::flatbuffers::Optional<uint32_t> act_block_h_override() const {
78  return GetOptional<uint32_t, uint32_t>(VT_ACT_BLOCK_H_OVERRIDE);
79  }
80  ::flatbuffers::Optional<uint32_t> act_block_w_div() const {
81  return GetOptional<uint32_t, uint32_t>(VT_ACT_BLOCK_W_DIV);
82  }
83  ::flatbuffers::Optional<bool> reshard_if_not_optimal() const {
84  return GetOptional<uint8_t, bool>(VT_RESHARD_IF_NOT_OPTIMAL);
85  }
86  ::flatbuffers::Optional<bool> override_sharding_config() const {
87  return GetOptional<uint8_t, bool>(VT_OVERRIDE_SHARDING_CONFIG);
88  }
89  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> shard_layout() const {
90  return GetOptional<uint16_t, tt::target::ttnn::TensorMemoryLayout>(VT_SHARD_LAYOUT);
91  }
92  const tt::target::ttnn::CoreRangeSet *core_grid() const {
93  return GetPointer<const tt::target::ttnn::CoreRangeSet *>(VT_CORE_GRID);
94  }
95  ::flatbuffers::Optional<bool> transpose_shards() const {
96  return GetOptional<uint8_t, bool>(VT_TRANSPOSE_SHARDS);
97  }
98  ::flatbuffers::Optional<tt::target::TensorLayout> output_layout() const {
99  return GetOptional<uint16_t, tt::target::TensorLayout>(VT_OUTPUT_LAYOUT);
100  }
101  ::flatbuffers::Optional<bool> preprocess_weights_on_device() const {
102  return GetOptional<uint8_t, bool>(VT_PREPROCESS_WEIGHTS_ON_DEVICE);
103  }
104  ::flatbuffers::Optional<bool> always_preprocess_weights() const {
105  return GetOptional<uint8_t, bool>(VT_ALWAYS_PREPROCESS_WEIGHTS);
106  }
107  ::flatbuffers::Optional<bool> enable_act_double_buffer() const {
108  return GetOptional<uint8_t, bool>(VT_ENABLE_ACT_DOUBLE_BUFFER);
109  }
110  ::flatbuffers::Optional<bool> enable_weights_double_buffer() const {
111  return GetOptional<uint8_t, bool>(VT_ENABLE_WEIGHTS_DOUBLE_BUFFER);
112  }
113  ::flatbuffers::Optional<bool> enable_split_reader() const {
114  return GetOptional<uint8_t, bool>(VT_ENABLE_SPLIT_READER);
115  }
116  ::flatbuffers::Optional<bool> enable_subblock_padding() const {
117  return GetOptional<uint8_t, bool>(VT_ENABLE_SUBBLOCK_PADDING);
118  }
119  bool Verify(::flatbuffers::Verifier &verifier) const {
120  return VerifyTableStart(verifier) &&
121  VerifyField<uint16_t>(verifier, VT_DTYPE, 2) &&
122  VerifyField<uint16_t>(verifier, VT_WEIGHTS_DTYPE, 2) &&
123  VerifyOffset(verifier, VT_ACTIVATION) &&
124  verifier.VerifyString(activation()) &&
125  VerifyField<uint8_t>(verifier, VT_DEALLOCATE_ACTIVATION, 1) &&
126  VerifyField<uint8_t>(verifier, VT_REALLOCATE_HALO_OUTPUT, 1) &&
127  VerifyField<uint32_t>(verifier, VT_ACT_BLOCK_H_OVERRIDE, 4) &&
128  VerifyField<uint32_t>(verifier, VT_ACT_BLOCK_W_DIV, 4) &&
129  VerifyField<uint8_t>(verifier, VT_RESHARD_IF_NOT_OPTIMAL, 1) &&
130  VerifyField<uint8_t>(verifier, VT_OVERRIDE_SHARDING_CONFIG, 1) &&
131  VerifyField<uint16_t>(verifier, VT_SHARD_LAYOUT, 2) &&
132  VerifyOffset(verifier, VT_CORE_GRID) &&
133  verifier.VerifyTable(core_grid()) &&
134  VerifyField<uint8_t>(verifier, VT_TRANSPOSE_SHARDS, 1) &&
135  VerifyField<uint16_t>(verifier, VT_OUTPUT_LAYOUT, 2) &&
136  VerifyField<uint8_t>(verifier, VT_PREPROCESS_WEIGHTS_ON_DEVICE, 1) &&
137  VerifyField<uint8_t>(verifier, VT_ALWAYS_PREPROCESS_WEIGHTS, 1) &&
138  VerifyField<uint8_t>(verifier, VT_ENABLE_ACT_DOUBLE_BUFFER, 1) &&
139  VerifyField<uint8_t>(verifier, VT_ENABLE_WEIGHTS_DOUBLE_BUFFER, 1) &&
140  VerifyField<uint8_t>(verifier, VT_ENABLE_SPLIT_READER, 1) &&
141  VerifyField<uint8_t>(verifier, VT_ENABLE_SUBBLOCK_PADDING, 1) &&
142  verifier.EndTable();
143  }
144 };
145 
147  typedef Conv2dConfig Table;
148  ::flatbuffers::FlatBufferBuilder &fbb_;
149  ::flatbuffers::uoffset_t start_;
151  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_DTYPE, static_cast<uint16_t>(dtype));
152  }
154  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_WEIGHTS_DTYPE, static_cast<uint16_t>(weights_dtype));
155  }
156  void add_activation(::flatbuffers::Offset<::flatbuffers::String> activation) {
157  fbb_.AddOffset(Conv2dConfig::VT_ACTIVATION, activation);
158  }
159  void add_deallocate_activation(bool deallocate_activation) {
160  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_DEALLOCATE_ACTIVATION, static_cast<uint8_t>(deallocate_activation));
161  }
162  void add_reallocate_halo_output(bool reallocate_halo_output) {
163  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_REALLOCATE_HALO_OUTPUT, static_cast<uint8_t>(reallocate_halo_output));
164  }
165  void add_act_block_h_override(uint32_t act_block_h_override) {
166  fbb_.AddElement<uint32_t>(Conv2dConfig::VT_ACT_BLOCK_H_OVERRIDE, act_block_h_override);
167  }
168  void add_act_block_w_div(uint32_t act_block_w_div) {
169  fbb_.AddElement<uint32_t>(Conv2dConfig::VT_ACT_BLOCK_W_DIV, act_block_w_div);
170  }
171  void add_reshard_if_not_optimal(bool reshard_if_not_optimal) {
172  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_RESHARD_IF_NOT_OPTIMAL, static_cast<uint8_t>(reshard_if_not_optimal));
173  }
174  void add_override_sharding_config(bool override_sharding_config) {
175  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_OVERRIDE_SHARDING_CONFIG, static_cast<uint8_t>(override_sharding_config));
176  }
178  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_SHARD_LAYOUT, static_cast<uint16_t>(shard_layout));
179  }
180  void add_core_grid(::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> core_grid) {
181  fbb_.AddOffset(Conv2dConfig::VT_CORE_GRID, core_grid);
182  }
183  void add_transpose_shards(bool transpose_shards) {
184  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_TRANSPOSE_SHARDS, static_cast<uint8_t>(transpose_shards));
185  }
187  fbb_.AddElement<uint16_t>(Conv2dConfig::VT_OUTPUT_LAYOUT, static_cast<uint16_t>(output_layout));
188  }
189  void add_preprocess_weights_on_device(bool preprocess_weights_on_device) {
190  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_PREPROCESS_WEIGHTS_ON_DEVICE, static_cast<uint8_t>(preprocess_weights_on_device));
191  }
192  void add_always_preprocess_weights(bool always_preprocess_weights) {
193  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ALWAYS_PREPROCESS_WEIGHTS, static_cast<uint8_t>(always_preprocess_weights));
194  }
195  void add_enable_act_double_buffer(bool enable_act_double_buffer) {
196  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_ACT_DOUBLE_BUFFER, static_cast<uint8_t>(enable_act_double_buffer));
197  }
198  void add_enable_weights_double_buffer(bool enable_weights_double_buffer) {
199  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_WEIGHTS_DOUBLE_BUFFER, static_cast<uint8_t>(enable_weights_double_buffer));
200  }
201  void add_enable_split_reader(bool enable_split_reader) {
202  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_SPLIT_READER, static_cast<uint8_t>(enable_split_reader));
203  }
204  void add_enable_subblock_padding(bool enable_subblock_padding) {
205  fbb_.AddElement<uint8_t>(Conv2dConfig::VT_ENABLE_SUBBLOCK_PADDING, static_cast<uint8_t>(enable_subblock_padding));
206  }
207  explicit Conv2dConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
208  : fbb_(_fbb) {
209  start_ = fbb_.StartTable();
210  }
211  ::flatbuffers::Offset<Conv2dConfig> Finish() {
212  const auto end = fbb_.EndTable(start_);
213  auto o = ::flatbuffers::Offset<Conv2dConfig>(end);
214  return o;
215  }
216 };
217 
218 inline ::flatbuffers::Offset<Conv2dConfig> CreateConv2dConfig(
219  ::flatbuffers::FlatBufferBuilder &_fbb,
220  ::flatbuffers::Optional<tt::target::DataType> dtype = ::flatbuffers::nullopt,
221  ::flatbuffers::Optional<tt::target::DataType> weights_dtype = ::flatbuffers::nullopt,
222  ::flatbuffers::Offset<::flatbuffers::String> activation = 0,
223  ::flatbuffers::Optional<bool> deallocate_activation = ::flatbuffers::nullopt,
224  ::flatbuffers::Optional<bool> reallocate_halo_output = ::flatbuffers::nullopt,
225  ::flatbuffers::Optional<uint32_t> act_block_h_override = ::flatbuffers::nullopt,
226  ::flatbuffers::Optional<uint32_t> act_block_w_div = ::flatbuffers::nullopt,
227  ::flatbuffers::Optional<bool> reshard_if_not_optimal = ::flatbuffers::nullopt,
228  ::flatbuffers::Optional<bool> override_sharding_config = ::flatbuffers::nullopt,
229  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> shard_layout = ::flatbuffers::nullopt,
230  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> core_grid = 0,
231  ::flatbuffers::Optional<bool> transpose_shards = ::flatbuffers::nullopt,
232  ::flatbuffers::Optional<tt::target::TensorLayout> output_layout = ::flatbuffers::nullopt,
233  ::flatbuffers::Optional<bool> preprocess_weights_on_device = ::flatbuffers::nullopt,
234  ::flatbuffers::Optional<bool> always_preprocess_weights = ::flatbuffers::nullopt,
235  ::flatbuffers::Optional<bool> enable_act_double_buffer = ::flatbuffers::nullopt,
236  ::flatbuffers::Optional<bool> enable_weights_double_buffer = ::flatbuffers::nullopt,
237  ::flatbuffers::Optional<bool> enable_split_reader = ::flatbuffers::nullopt,
238  ::flatbuffers::Optional<bool> enable_subblock_padding = ::flatbuffers::nullopt) {
239  Conv2dConfigBuilder builder_(_fbb);
240  builder_.add_core_grid(core_grid);
241  if(act_block_w_div) { builder_.add_act_block_w_div(*act_block_w_div); }
242  if(act_block_h_override) { builder_.add_act_block_h_override(*act_block_h_override); }
243  builder_.add_activation(activation);
244  if(output_layout) { builder_.add_output_layout(*output_layout); }
245  if(shard_layout) { builder_.add_shard_layout(*shard_layout); }
246  if(weights_dtype) { builder_.add_weights_dtype(*weights_dtype); }
247  if(dtype) { builder_.add_dtype(*dtype); }
248  if(enable_subblock_padding) { builder_.add_enable_subblock_padding(*enable_subblock_padding); }
249  if(enable_split_reader) { builder_.add_enable_split_reader(*enable_split_reader); }
250  if(enable_weights_double_buffer) { builder_.add_enable_weights_double_buffer(*enable_weights_double_buffer); }
251  if(enable_act_double_buffer) { builder_.add_enable_act_double_buffer(*enable_act_double_buffer); }
252  if(always_preprocess_weights) { builder_.add_always_preprocess_weights(*always_preprocess_weights); }
253  if(preprocess_weights_on_device) { builder_.add_preprocess_weights_on_device(*preprocess_weights_on_device); }
254  if(transpose_shards) { builder_.add_transpose_shards(*transpose_shards); }
255  if(override_sharding_config) { builder_.add_override_sharding_config(*override_sharding_config); }
256  if(reshard_if_not_optimal) { builder_.add_reshard_if_not_optimal(*reshard_if_not_optimal); }
257  if(reallocate_halo_output) { builder_.add_reallocate_halo_output(*reallocate_halo_output); }
258  if(deallocate_activation) { builder_.add_deallocate_activation(*deallocate_activation); }
259  return builder_.Finish();
260 }
261 
263  using type = Conv2dConfig;
264  static auto constexpr Create = CreateConv2dConfig;
265 };
266 
267 inline ::flatbuffers::Offset<Conv2dConfig> CreateConv2dConfigDirect(
268  ::flatbuffers::FlatBufferBuilder &_fbb,
269  ::flatbuffers::Optional<tt::target::DataType> dtype = ::flatbuffers::nullopt,
270  ::flatbuffers::Optional<tt::target::DataType> weights_dtype = ::flatbuffers::nullopt,
271  const char *activation = nullptr,
272  ::flatbuffers::Optional<bool> deallocate_activation = ::flatbuffers::nullopt,
273  ::flatbuffers::Optional<bool> reallocate_halo_output = ::flatbuffers::nullopt,
274  ::flatbuffers::Optional<uint32_t> act_block_h_override = ::flatbuffers::nullopt,
275  ::flatbuffers::Optional<uint32_t> act_block_w_div = ::flatbuffers::nullopt,
276  ::flatbuffers::Optional<bool> reshard_if_not_optimal = ::flatbuffers::nullopt,
277  ::flatbuffers::Optional<bool> override_sharding_config = ::flatbuffers::nullopt,
278  ::flatbuffers::Optional<tt::target::ttnn::TensorMemoryLayout> shard_layout = ::flatbuffers::nullopt,
279  ::flatbuffers::Offset<tt::target::ttnn::CoreRangeSet> core_grid = 0,
280  ::flatbuffers::Optional<bool> transpose_shards = ::flatbuffers::nullopt,
281  ::flatbuffers::Optional<tt::target::TensorLayout> output_layout = ::flatbuffers::nullopt,
282  ::flatbuffers::Optional<bool> preprocess_weights_on_device = ::flatbuffers::nullopt,
283  ::flatbuffers::Optional<bool> always_preprocess_weights = ::flatbuffers::nullopt,
284  ::flatbuffers::Optional<bool> enable_act_double_buffer = ::flatbuffers::nullopt,
285  ::flatbuffers::Optional<bool> enable_weights_double_buffer = ::flatbuffers::nullopt,
286  ::flatbuffers::Optional<bool> enable_split_reader = ::flatbuffers::nullopt,
287  ::flatbuffers::Optional<bool> enable_subblock_padding = ::flatbuffers::nullopt) {
288  auto activation__ = activation ? _fbb.CreateString(activation) : 0;
290  _fbb,
291  dtype,
292  weights_dtype,
293  activation__,
294  deallocate_activation,
295  reallocate_halo_output,
296  act_block_h_override,
297  act_block_w_div,
298  reshard_if_not_optimal,
299  override_sharding_config,
300  shard_layout,
301  core_grid,
302  transpose_shards,
303  output_layout,
304  preprocess_weights_on_device,
305  always_preprocess_weights,
306  enable_act_double_buffer,
307  enable_weights_double_buffer,
308  enable_split_reader,
309  enable_subblock_padding);
310 }
311 
312 struct PrepareConv2dWeightsOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
314  struct Traits;
315  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
316  return "tt.target.ttnn.PrepareConv2dWeightsOp";
317  }
318  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
320  VT_OUT = 6,
330  VT_STRIDE = 26,
334  VT_GROUPS = 34,
335  VT_DEVICE = 36,
336  VT_CONV2D_CONFIG = 38
337  };
338  const tt::target::ttnn::TensorRef *weight_tensor() const {
339  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT_TENSOR);
340  }
341  const tt::target::ttnn::TensorRef *out() const {
342  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
343  }
344  const tt::target::ttnn::MemoryConfig *input_memory_config() const {
345  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_INPUT_MEMORY_CONFIG);
346  }
348  return static_cast<tt::target::TensorLayout>(GetField<uint16_t>(VT_INPUT_TENSOR_LAYOUT, 0));
349  }
350  const ::flatbuffers::String *weights_format() const {
351  return GetPointer<const ::flatbuffers::String *>(VT_WEIGHTS_FORMAT);
352  }
353  uint32_t in_channels() const {
354  return GetField<uint32_t>(VT_IN_CHANNELS, 0);
355  }
356  uint32_t out_channels() const {
357  return GetField<uint32_t>(VT_OUT_CHANNELS, 0);
358  }
359  uint32_t batch_size() const {
360  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
361  }
362  uint32_t input_height() const {
363  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
364  }
365  uint32_t input_width() const {
366  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
367  }
368  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
369  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
370  }
371  const ::flatbuffers::Vector<int32_t> *stride() const {
372  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
373  }
374  const ::flatbuffers::Vector<int32_t> *padding() const {
375  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
376  }
377  const ::flatbuffers::Vector<int32_t> *dilation() const {
378  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
379  }
380  bool has_bias() const {
381  return GetField<uint8_t>(VT_HAS_BIAS, 0) != 0;
382  }
383  uint32_t groups() const {
384  return GetField<uint32_t>(VT_GROUPS, 0);
385  }
386  const tt::target::DeviceRef *device() const {
387  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
388  }
389  const tt::target::ttnn::Conv2dConfig *conv2d_config() const {
390  return GetPointer<const tt::target::ttnn::Conv2dConfig *>(VT_CONV2D_CONFIG);
391  }
392  bool Verify(::flatbuffers::Verifier &verifier) const {
393  return VerifyTableStart(verifier) &&
394  VerifyOffset(verifier, VT_WEIGHT_TENSOR) &&
395  verifier.VerifyTable(weight_tensor()) &&
396  VerifyOffset(verifier, VT_OUT) &&
397  verifier.VerifyTable(out()) &&
398  VerifyOffset(verifier, VT_INPUT_MEMORY_CONFIG) &&
399  verifier.VerifyTable(input_memory_config()) &&
400  VerifyField<uint16_t>(verifier, VT_INPUT_TENSOR_LAYOUT, 2) &&
401  VerifyOffset(verifier, VT_WEIGHTS_FORMAT) &&
402  verifier.VerifyString(weights_format()) &&
403  VerifyField<uint32_t>(verifier, VT_IN_CHANNELS, 4) &&
404  VerifyField<uint32_t>(verifier, VT_OUT_CHANNELS, 4) &&
405  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
406  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
407  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
408  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
409  verifier.VerifyVector(kernel_size()) &&
410  VerifyOffset(verifier, VT_STRIDE) &&
411  verifier.VerifyVector(stride()) &&
412  VerifyOffset(verifier, VT_PADDING) &&
413  verifier.VerifyVector(padding()) &&
414  VerifyOffset(verifier, VT_DILATION) &&
415  verifier.VerifyVector(dilation()) &&
416  VerifyField<uint8_t>(verifier, VT_HAS_BIAS, 1) &&
417  VerifyField<uint32_t>(verifier, VT_GROUPS, 4) &&
418  VerifyOffset(verifier, VT_DEVICE) &&
419  verifier.VerifyTable(device()) &&
420  VerifyOffset(verifier, VT_CONV2D_CONFIG) &&
421  verifier.VerifyTable(conv2d_config()) &&
422  verifier.EndTable();
423  }
424 };
425 
428  ::flatbuffers::FlatBufferBuilder &fbb_;
429  ::flatbuffers::uoffset_t start_;
430  void add_weight_tensor(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight_tensor) {
431  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_WEIGHT_TENSOR, weight_tensor);
432  }
433  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
434  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_OUT, out);
435  }
436  void add_input_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> input_memory_config) {
437  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_INPUT_MEMORY_CONFIG, input_memory_config);
438  }
440  fbb_.AddElement<uint16_t>(PrepareConv2dWeightsOp::VT_INPUT_TENSOR_LAYOUT, static_cast<uint16_t>(input_tensor_layout), 0);
441  }
442  void add_weights_format(::flatbuffers::Offset<::flatbuffers::String> weights_format) {
443  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_WEIGHTS_FORMAT, weights_format);
444  }
445  void add_in_channels(uint32_t in_channels) {
446  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_IN_CHANNELS, in_channels, 0);
447  }
448  void add_out_channels(uint32_t out_channels) {
449  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_OUT_CHANNELS, out_channels, 0);
450  }
451  void add_batch_size(uint32_t batch_size) {
452  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_BATCH_SIZE, batch_size, 0);
453  }
454  void add_input_height(uint32_t input_height) {
455  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_INPUT_HEIGHT, input_height, 0);
456  }
457  void add_input_width(uint32_t input_width) {
458  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_INPUT_WIDTH, input_width, 0);
459  }
460  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
461  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_KERNEL_SIZE, kernel_size);
462  }
463  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
464  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_STRIDE, stride);
465  }
466  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
467  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_PADDING, padding);
468  }
469  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
470  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_DILATION, dilation);
471  }
472  void add_has_bias(bool has_bias) {
473  fbb_.AddElement<uint8_t>(PrepareConv2dWeightsOp::VT_HAS_BIAS, static_cast<uint8_t>(has_bias), 0);
474  }
475  void add_groups(uint32_t groups) {
476  fbb_.AddElement<uint32_t>(PrepareConv2dWeightsOp::VT_GROUPS, groups, 0);
477  }
478  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
479  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_DEVICE, device);
480  }
481  void add_conv2d_config(::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config) {
482  fbb_.AddOffset(PrepareConv2dWeightsOp::VT_CONV2D_CONFIG, conv2d_config);
483  }
484  explicit PrepareConv2dWeightsOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
485  : fbb_(_fbb) {
486  start_ = fbb_.StartTable();
487  }
488  ::flatbuffers::Offset<PrepareConv2dWeightsOp> Finish() {
489  const auto end = fbb_.EndTable(start_);
490  auto o = ::flatbuffers::Offset<PrepareConv2dWeightsOp>(end);
491  return o;
492  }
493 };
494 
495 inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> CreatePrepareConv2dWeightsOp(
496  ::flatbuffers::FlatBufferBuilder &_fbb,
497  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight_tensor = 0,
498  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
499  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> input_memory_config = 0,
501  ::flatbuffers::Offset<::flatbuffers::String> weights_format = 0,
502  uint32_t in_channels = 0,
503  uint32_t out_channels = 0,
504  uint32_t batch_size = 0,
505  uint32_t input_height = 0,
506  uint32_t input_width = 0,
507  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
508  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
509  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
510  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
511  bool has_bias = false,
512  uint32_t groups = 0,
513  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
514  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
515  PrepareConv2dWeightsOpBuilder builder_(_fbb);
516  builder_.add_conv2d_config(conv2d_config);
517  builder_.add_device(device);
518  builder_.add_groups(groups);
519  builder_.add_dilation(dilation);
520  builder_.add_padding(padding);
521  builder_.add_stride(stride);
522  builder_.add_kernel_size(kernel_size);
523  builder_.add_input_width(input_width);
524  builder_.add_input_height(input_height);
525  builder_.add_batch_size(batch_size);
526  builder_.add_out_channels(out_channels);
527  builder_.add_in_channels(in_channels);
528  builder_.add_weights_format(weights_format);
529  builder_.add_input_memory_config(input_memory_config);
530  builder_.add_out(out);
531  builder_.add_weight_tensor(weight_tensor);
532  builder_.add_input_tensor_layout(input_tensor_layout);
533  builder_.add_has_bias(has_bias);
534  return builder_.Finish();
535 }
536 
539  static auto constexpr Create = CreatePrepareConv2dWeightsOp;
540 };
541 
542 inline ::flatbuffers::Offset<PrepareConv2dWeightsOp> CreatePrepareConv2dWeightsOpDirect(
543  ::flatbuffers::FlatBufferBuilder &_fbb,
544  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight_tensor = 0,
545  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
546  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> input_memory_config = 0,
548  const char *weights_format = nullptr,
549  uint32_t in_channels = 0,
550  uint32_t out_channels = 0,
551  uint32_t batch_size = 0,
552  uint32_t input_height = 0,
553  uint32_t input_width = 0,
554  const std::vector<int32_t> *kernel_size = nullptr,
555  const std::vector<int32_t> *stride = nullptr,
556  const std::vector<int32_t> *padding = nullptr,
557  const std::vector<int32_t> *dilation = nullptr,
558  bool has_bias = false,
559  uint32_t groups = 0,
560  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
561  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
562  auto weights_format__ = weights_format ? _fbb.CreateString(weights_format) : 0;
563  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
564  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
565  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
566  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
568  _fbb,
569  weight_tensor,
570  out,
571  input_memory_config,
572  input_tensor_layout,
573  weights_format__,
574  in_channels,
575  out_channels,
576  batch_size,
577  input_height,
578  input_width,
579  kernel_size__,
580  stride__,
581  padding__,
582  dilation__,
583  has_bias,
584  groups,
585  device,
586  conv2d_config);
587 }
588 
589 struct Conv2dOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
591  struct Traits;
592  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
593  return "tt.target.ttnn.Conv2dOp";
594  }
595  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
596  VT_INPUT = 4,
598  VT_BIAS = 8,
599  VT_OUT = 10,
600  VT_DEVICE = 12,
601  VT_IN_CHANNELS = 14,
602  VT_OUT_CHANNELS = 16,
603  VT_BATCH_SIZE = 18,
604  VT_INPUT_HEIGHT = 20,
605  VT_INPUT_WIDTH = 22,
606  VT_KERNEL_SIZE = 24,
607  VT_STRIDE = 26,
608  VT_PADDING = 28,
609  VT_DILATION = 30,
610  VT_GROUPS = 32,
611  VT_CONV2D_CONFIG = 34
612  };
613  const tt::target::ttnn::TensorRef *input() const {
614  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_INPUT);
615  }
616  const tt::target::ttnn::TensorRef *weight() const {
617  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT);
618  }
619  const tt::target::ttnn::TensorRef *bias() const {
620  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
621  }
622  const tt::target::ttnn::TensorRef *out() const {
623  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
624  }
625  const tt::target::DeviceRef *device() const {
626  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
627  }
628  uint32_t in_channels() const {
629  return GetField<uint32_t>(VT_IN_CHANNELS, 0);
630  }
631  uint32_t out_channels() const {
632  return GetField<uint32_t>(VT_OUT_CHANNELS, 0);
633  }
634  uint32_t batch_size() const {
635  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
636  }
637  uint32_t input_height() const {
638  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
639  }
640  uint32_t input_width() const {
641  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
642  }
643  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
644  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
645  }
646  const ::flatbuffers::Vector<int32_t> *stride() const {
647  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
648  }
649  const ::flatbuffers::Vector<int32_t> *padding() const {
650  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
651  }
652  const ::flatbuffers::Vector<int32_t> *dilation() const {
653  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
654  }
655  uint32_t groups() const {
656  return GetField<uint32_t>(VT_GROUPS, 0);
657  }
658  const tt::target::ttnn::Conv2dConfig *conv2d_config() const {
659  return GetPointer<const tt::target::ttnn::Conv2dConfig *>(VT_CONV2D_CONFIG);
660  }
661  bool Verify(::flatbuffers::Verifier &verifier) const {
662  return VerifyTableStart(verifier) &&
663  VerifyOffset(verifier, VT_INPUT) &&
664  verifier.VerifyTable(input()) &&
665  VerifyOffset(verifier, VT_WEIGHT) &&
666  verifier.VerifyTable(weight()) &&
667  VerifyOffset(verifier, VT_BIAS) &&
668  verifier.VerifyTable(bias()) &&
669  VerifyOffset(verifier, VT_OUT) &&
670  verifier.VerifyTable(out()) &&
671  VerifyOffset(verifier, VT_DEVICE) &&
672  verifier.VerifyTable(device()) &&
673  VerifyField<uint32_t>(verifier, VT_IN_CHANNELS, 4) &&
674  VerifyField<uint32_t>(verifier, VT_OUT_CHANNELS, 4) &&
675  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
676  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
677  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
678  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
679  verifier.VerifyVector(kernel_size()) &&
680  VerifyOffset(verifier, VT_STRIDE) &&
681  verifier.VerifyVector(stride()) &&
682  VerifyOffset(verifier, VT_PADDING) &&
683  verifier.VerifyVector(padding()) &&
684  VerifyOffset(verifier, VT_DILATION) &&
685  verifier.VerifyVector(dilation()) &&
686  VerifyField<uint32_t>(verifier, VT_GROUPS, 4) &&
687  VerifyOffset(verifier, VT_CONV2D_CONFIG) &&
688  verifier.VerifyTable(conv2d_config()) &&
689  verifier.EndTable();
690  }
691 };
692 
694  typedef Conv2dOp Table;
695  ::flatbuffers::FlatBufferBuilder &fbb_;
696  ::flatbuffers::uoffset_t start_;
697  void add_input(::flatbuffers::Offset<tt::target::ttnn::TensorRef> input) {
698  fbb_.AddOffset(Conv2dOp::VT_INPUT, input);
699  }
700  void add_weight(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight) {
701  fbb_.AddOffset(Conv2dOp::VT_WEIGHT, weight);
702  }
703  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
704  fbb_.AddOffset(Conv2dOp::VT_BIAS, bias);
705  }
706  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
707  fbb_.AddOffset(Conv2dOp::VT_OUT, out);
708  }
709  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
710  fbb_.AddOffset(Conv2dOp::VT_DEVICE, device);
711  }
712  void add_in_channels(uint32_t in_channels) {
713  fbb_.AddElement<uint32_t>(Conv2dOp::VT_IN_CHANNELS, in_channels, 0);
714  }
715  void add_out_channels(uint32_t out_channels) {
716  fbb_.AddElement<uint32_t>(Conv2dOp::VT_OUT_CHANNELS, out_channels, 0);
717  }
718  void add_batch_size(uint32_t batch_size) {
719  fbb_.AddElement<uint32_t>(Conv2dOp::VT_BATCH_SIZE, batch_size, 0);
720  }
721  void add_input_height(uint32_t input_height) {
722  fbb_.AddElement<uint32_t>(Conv2dOp::VT_INPUT_HEIGHT, input_height, 0);
723  }
724  void add_input_width(uint32_t input_width) {
725  fbb_.AddElement<uint32_t>(Conv2dOp::VT_INPUT_WIDTH, input_width, 0);
726  }
727  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
728  fbb_.AddOffset(Conv2dOp::VT_KERNEL_SIZE, kernel_size);
729  }
730  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
731  fbb_.AddOffset(Conv2dOp::VT_STRIDE, stride);
732  }
733  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
734  fbb_.AddOffset(Conv2dOp::VT_PADDING, padding);
735  }
736  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
737  fbb_.AddOffset(Conv2dOp::VT_DILATION, dilation);
738  }
739  void add_groups(uint32_t groups) {
740  fbb_.AddElement<uint32_t>(Conv2dOp::VT_GROUPS, groups, 0);
741  }
742  void add_conv2d_config(::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config) {
743  fbb_.AddOffset(Conv2dOp::VT_CONV2D_CONFIG, conv2d_config);
744  }
745  explicit Conv2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
746  : fbb_(_fbb) {
747  start_ = fbb_.StartTable();
748  }
749  ::flatbuffers::Offset<Conv2dOp> Finish() {
750  const auto end = fbb_.EndTable(start_);
751  auto o = ::flatbuffers::Offset<Conv2dOp>(end);
752  return o;
753  }
754 };
755 
756 inline ::flatbuffers::Offset<Conv2dOp> CreateConv2dOp(
757  ::flatbuffers::FlatBufferBuilder &_fbb,
758  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
759  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
760  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
761  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
762  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
763  uint32_t in_channels = 0,
764  uint32_t out_channels = 0,
765  uint32_t batch_size = 0,
766  uint32_t input_height = 0,
767  uint32_t input_width = 0,
768  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
769  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
770  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
771  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
772  uint32_t groups = 0,
773  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
774  Conv2dOpBuilder builder_(_fbb);
775  builder_.add_conv2d_config(conv2d_config);
776  builder_.add_groups(groups);
777  builder_.add_dilation(dilation);
778  builder_.add_padding(padding);
779  builder_.add_stride(stride);
780  builder_.add_kernel_size(kernel_size);
781  builder_.add_input_width(input_width);
782  builder_.add_input_height(input_height);
783  builder_.add_batch_size(batch_size);
784  builder_.add_out_channels(out_channels);
785  builder_.add_in_channels(in_channels);
786  builder_.add_device(device);
787  builder_.add_out(out);
788  builder_.add_bias(bias);
789  builder_.add_weight(weight);
790  builder_.add_input(input);
791  return builder_.Finish();
792 }
793 
795  using type = Conv2dOp;
796  static auto constexpr Create = CreateConv2dOp;
797 };
798 
799 inline ::flatbuffers::Offset<Conv2dOp> CreateConv2dOpDirect(
800  ::flatbuffers::FlatBufferBuilder &_fbb,
801  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
802  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
803  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
804  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
805  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
806  uint32_t in_channels = 0,
807  uint32_t out_channels = 0,
808  uint32_t batch_size = 0,
809  uint32_t input_height = 0,
810  uint32_t input_width = 0,
811  const std::vector<int32_t> *kernel_size = nullptr,
812  const std::vector<int32_t> *stride = nullptr,
813  const std::vector<int32_t> *padding = nullptr,
814  const std::vector<int32_t> *dilation = nullptr,
815  uint32_t groups = 0,
816  ::flatbuffers::Offset<tt::target::ttnn::Conv2dConfig> conv2d_config = 0) {
817  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
818  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
819  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
820  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
822  _fbb,
823  input,
824  weight,
825  bias,
826  out,
827  device,
828  in_channels,
829  out_channels,
830  batch_size,
831  input_height,
832  input_width,
833  kernel_size__,
834  stride__,
835  padding__,
836  dilation__,
837  groups,
838  conv2d_config);
839 }
840 
841 struct ConvTranspose2dOp FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
843  struct Traits;
844  static FLATBUFFERS_CONSTEXPR_CPP11 const char *GetFullyQualifiedName() {
845  return "tt.target.ttnn.ConvTranspose2dOp";
846  }
847  enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
848  VT_INPUT = 4,
849  VT_WEIGHT = 6,
850  VT_BIAS = 8,
851  VT_OUT = 10,
852  VT_DEVICE = 12,
853  VT_IN_CHANNELS = 14,
854  VT_OUT_CHANNELS = 16,
855  VT_BATCH_SIZE = 18,
856  VT_INPUT_HEIGHT = 20,
857  VT_INPUT_WIDTH = 22,
858  VT_KERNEL_SIZE = 24,
859  VT_STRIDE = 26,
860  VT_PADDING = 28,
862  VT_DILATION = 32,
863  VT_GROUPS = 34,
864  VT_MEMORY_CONFIG = 36
865  };
866  const tt::target::ttnn::TensorRef *input() const {
867  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_INPUT);
868  }
869  const tt::target::ttnn::TensorRef *weight() const {
870  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_WEIGHT);
871  }
872  const tt::target::ttnn::TensorRef *bias() const {
873  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_BIAS);
874  }
875  const tt::target::ttnn::TensorRef *out() const {
876  return GetPointer<const tt::target::ttnn::TensorRef *>(VT_OUT);
877  }
878  const tt::target::DeviceRef *device() const {
879  return GetPointer<const tt::target::DeviceRef *>(VT_DEVICE);
880  }
881  uint32_t in_channels() const {
882  return GetField<uint32_t>(VT_IN_CHANNELS, 0);
883  }
884  uint32_t out_channels() const {
885  return GetField<uint32_t>(VT_OUT_CHANNELS, 0);
886  }
887  uint32_t batch_size() const {
888  return GetField<uint32_t>(VT_BATCH_SIZE, 0);
889  }
890  uint32_t input_height() const {
891  return GetField<uint32_t>(VT_INPUT_HEIGHT, 0);
892  }
893  uint32_t input_width() const {
894  return GetField<uint32_t>(VT_INPUT_WIDTH, 0);
895  }
896  const ::flatbuffers::Vector<int32_t> *kernel_size() const {
897  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL_SIZE);
898  }
899  const ::flatbuffers::Vector<int32_t> *stride() const {
900  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
901  }
902  const ::flatbuffers::Vector<int32_t> *padding() const {
903  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PADDING);
904  }
905  const ::flatbuffers::Vector<int32_t> *output_padding() const {
906  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OUTPUT_PADDING);
907  }
908  const ::flatbuffers::Vector<int32_t> *dilation() const {
909  return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
910  }
911  uint32_t groups() const {
912  return GetField<uint32_t>(VT_GROUPS, 0);
913  }
914  const tt::target::ttnn::MemoryConfig *memory_config() const {
915  return GetPointer<const tt::target::ttnn::MemoryConfig *>(VT_MEMORY_CONFIG);
916  }
917  bool Verify(::flatbuffers::Verifier &verifier) const {
918  return VerifyTableStart(verifier) &&
919  VerifyOffset(verifier, VT_INPUT) &&
920  verifier.VerifyTable(input()) &&
921  VerifyOffset(verifier, VT_WEIGHT) &&
922  verifier.VerifyTable(weight()) &&
923  VerifyOffset(verifier, VT_BIAS) &&
924  verifier.VerifyTable(bias()) &&
925  VerifyOffset(verifier, VT_OUT) &&
926  verifier.VerifyTable(out()) &&
927  VerifyOffset(verifier, VT_DEVICE) &&
928  verifier.VerifyTable(device()) &&
929  VerifyField<uint32_t>(verifier, VT_IN_CHANNELS, 4) &&
930  VerifyField<uint32_t>(verifier, VT_OUT_CHANNELS, 4) &&
931  VerifyField<uint32_t>(verifier, VT_BATCH_SIZE, 4) &&
932  VerifyField<uint32_t>(verifier, VT_INPUT_HEIGHT, 4) &&
933  VerifyField<uint32_t>(verifier, VT_INPUT_WIDTH, 4) &&
934  VerifyOffset(verifier, VT_KERNEL_SIZE) &&
935  verifier.VerifyVector(kernel_size()) &&
936  VerifyOffset(verifier, VT_STRIDE) &&
937  verifier.VerifyVector(stride()) &&
938  VerifyOffset(verifier, VT_PADDING) &&
939  verifier.VerifyVector(padding()) &&
940  VerifyOffset(verifier, VT_OUTPUT_PADDING) &&
941  verifier.VerifyVector(output_padding()) &&
942  VerifyOffset(verifier, VT_DILATION) &&
943  verifier.VerifyVector(dilation()) &&
944  VerifyField<uint32_t>(verifier, VT_GROUPS, 4) &&
945  VerifyOffset(verifier, VT_MEMORY_CONFIG) &&
946  verifier.VerifyTable(memory_config()) &&
947  verifier.EndTable();
948  }
949 };
950 
953  ::flatbuffers::FlatBufferBuilder &fbb_;
954  ::flatbuffers::uoffset_t start_;
955  void add_input(::flatbuffers::Offset<tt::target::ttnn::TensorRef> input) {
956  fbb_.AddOffset(ConvTranspose2dOp::VT_INPUT, input);
957  }
958  void add_weight(::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight) {
959  fbb_.AddOffset(ConvTranspose2dOp::VT_WEIGHT, weight);
960  }
961  void add_bias(::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias) {
962  fbb_.AddOffset(ConvTranspose2dOp::VT_BIAS, bias);
963  }
964  void add_out(::flatbuffers::Offset<tt::target::ttnn::TensorRef> out) {
965  fbb_.AddOffset(ConvTranspose2dOp::VT_OUT, out);
966  }
967  void add_device(::flatbuffers::Offset<tt::target::DeviceRef> device) {
968  fbb_.AddOffset(ConvTranspose2dOp::VT_DEVICE, device);
969  }
970  void add_in_channels(uint32_t in_channels) {
971  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_IN_CHANNELS, in_channels, 0);
972  }
973  void add_out_channels(uint32_t out_channels) {
974  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_OUT_CHANNELS, out_channels, 0);
975  }
976  void add_batch_size(uint32_t batch_size) {
977  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_BATCH_SIZE, batch_size, 0);
978  }
979  void add_input_height(uint32_t input_height) {
980  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_INPUT_HEIGHT, input_height, 0);
981  }
982  void add_input_width(uint32_t input_width) {
983  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_INPUT_WIDTH, input_width, 0);
984  }
985  void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size) {
986  fbb_.AddOffset(ConvTranspose2dOp::VT_KERNEL_SIZE, kernel_size);
987  }
988  void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
989  fbb_.AddOffset(ConvTranspose2dOp::VT_STRIDE, stride);
990  }
991  void add_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding) {
992  fbb_.AddOffset(ConvTranspose2dOp::VT_PADDING, padding);
993  }
994  void add_output_padding(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> output_padding) {
995  fbb_.AddOffset(ConvTranspose2dOp::VT_OUTPUT_PADDING, output_padding);
996  }
997  void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
998  fbb_.AddOffset(ConvTranspose2dOp::VT_DILATION, dilation);
999  }
1000  void add_groups(uint32_t groups) {
1001  fbb_.AddElement<uint32_t>(ConvTranspose2dOp::VT_GROUPS, groups, 0);
1002  }
1003  void add_memory_config(::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config) {
1004  fbb_.AddOffset(ConvTranspose2dOp::VT_MEMORY_CONFIG, memory_config);
1005  }
1006  explicit ConvTranspose2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
1007  : fbb_(_fbb) {
1008  start_ = fbb_.StartTable();
1009  }
1010  ::flatbuffers::Offset<ConvTranspose2dOp> Finish() {
1011  const auto end = fbb_.EndTable(start_);
1012  auto o = ::flatbuffers::Offset<ConvTranspose2dOp>(end);
1013  return o;
1014  }
1015 };
1016 
1017 inline ::flatbuffers::Offset<ConvTranspose2dOp> CreateConvTranspose2dOp(
1018  ::flatbuffers::FlatBufferBuilder &_fbb,
1019  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
1020  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
1021  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
1022  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
1023  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
1024  uint32_t in_channels = 0,
1025  uint32_t out_channels = 0,
1026  uint32_t batch_size = 0,
1027  uint32_t input_height = 0,
1028  uint32_t input_width = 0,
1029  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel_size = 0,
1030  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
1031  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> padding = 0,
1032  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> output_padding = 0,
1033  ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
1034  uint32_t groups = 0,
1035  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0) {
1036  ConvTranspose2dOpBuilder builder_(_fbb);
1037  builder_.add_memory_config(memory_config);
1038  builder_.add_groups(groups);
1039  builder_.add_dilation(dilation);
1040  builder_.add_output_padding(output_padding);
1041  builder_.add_padding(padding);
1042  builder_.add_stride(stride);
1043  builder_.add_kernel_size(kernel_size);
1044  builder_.add_input_width(input_width);
1045  builder_.add_input_height(input_height);
1046  builder_.add_batch_size(batch_size);
1047  builder_.add_out_channels(out_channels);
1048  builder_.add_in_channels(in_channels);
1049  builder_.add_device(device);
1050  builder_.add_out(out);
1051  builder_.add_bias(bias);
1052  builder_.add_weight(weight);
1053  builder_.add_input(input);
1054  return builder_.Finish();
1055 }
1056 
1059  static auto constexpr Create = CreateConvTranspose2dOp;
1060 };
1061 
1062 inline ::flatbuffers::Offset<ConvTranspose2dOp> CreateConvTranspose2dOpDirect(
1063  ::flatbuffers::FlatBufferBuilder &_fbb,
1064  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> input = 0,
1065  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> weight = 0,
1066  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> bias = 0,
1067  ::flatbuffers::Offset<tt::target::ttnn::TensorRef> out = 0,
1068  ::flatbuffers::Offset<tt::target::DeviceRef> device = 0,
1069  uint32_t in_channels = 0,
1070  uint32_t out_channels = 0,
1071  uint32_t batch_size = 0,
1072  uint32_t input_height = 0,
1073  uint32_t input_width = 0,
1074  const std::vector<int32_t> *kernel_size = nullptr,
1075  const std::vector<int32_t> *stride = nullptr,
1076  const std::vector<int32_t> *padding = nullptr,
1077  const std::vector<int32_t> *output_padding = nullptr,
1078  const std::vector<int32_t> *dilation = nullptr,
1079  uint32_t groups = 0,
1080  ::flatbuffers::Offset<tt::target::ttnn::MemoryConfig> memory_config = 0) {
1081  auto kernel_size__ = kernel_size ? _fbb.CreateVector<int32_t>(*kernel_size) : 0;
1082  auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
1083  auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
1084  auto output_padding__ = output_padding ? _fbb.CreateVector<int32_t>(*output_padding) : 0;
1085  auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
1087  _fbb,
1088  input,
1089  weight,
1090  bias,
1091  out,
1092  device,
1093  in_channels,
1094  out_channels,
1095  batch_size,
1096  input_height,
1097  input_width,
1098  kernel_size__,
1099  stride__,
1100  padding__,
1101  output_padding__,
1102  dilation__,
1103  groups,
1104  memory_config);
1105 }
1106 
1107 } // namespace ttnn
1108 } // namespace target
1109 } // namespace tt
1110 
1111 #endif // FLATBUFFERS_GENERATED_CONV_TT_TARGET_TTNN_H_
VT_PREPROCESS_WEIGHTS_ON_DEVICE
Definition: conv_generated.h:55
VT_INPUT
Definition: conv_generated.h:596
VT_DTYPE
Definition: conv_generated.h:42
VT_REALLOCATE_HALO_OUTPUT
Definition: conv_generated.h:46
VT_DILATION
Definition: conv_generated.h:332
VT_WEIGHT_TENSOR
Definition: conv_generated.h:319
VT_ACT_BLOCK_W_DIV
Definition: conv_generated.h:48
VT_PADDING
Definition: conv_generated.h:331
VT_WEIGHT
Definition: conv_generated.h:597
VT_ENABLE_WEIGHTS_DOUBLE_BUFFER
Definition: conv_generated.h:58
VT_OVERRIDE_SHARDING_CONFIG
Definition: conv_generated.h:50
VT_STRIDE
Definition: conv_generated.h:330
VT_KERNEL_SIZE
Definition: conv_generated.h:329
VT_BATCH_SIZE
Definition: conv_generated.h:326
VT_CORE_GRID
Definition: conv_generated.h:52
VT_GROUPS
Definition: conv_generated.h:334
VT_WEIGHTS_FORMAT
Definition: conv_generated.h:323
VT_ENABLE_SPLIT_READER
Definition: conv_generated.h:59
VT_DEALLOCATE_ACTIVATION
Definition: conv_generated.h:45
VT_TRANSPOSE_SHARDS
Definition: conv_generated.h:53
VT_SHARD_LAYOUT
Definition: conv_generated.h:51
VT_OUTPUT_LAYOUT
Definition: conv_generated.h:54
VT_WEIGHTS_DTYPE
Definition: conv_generated.h:43
VT_INPUT_HEIGHT
Definition: conv_generated.h:327
VT_INPUT_MEMORY_CONFIG
Definition: conv_generated.h:321
VT_ENABLE_ACT_DOUBLE_BUFFER
Definition: conv_generated.h:57
VT_IN_CHANNELS
Definition: conv_generated.h:324
VT_DEVICE
Definition: conv_generated.h:335
VT_INPUT_WIDTH
Definition: conv_generated.h:328
VT_ACT_BLOCK_H_OVERRIDE
Definition: conv_generated.h:47
VT_BIAS
Definition: conv_generated.h:598
VT_OUT
Definition: conv_generated.h:320
VT_RESHARD_IF_NOT_OPTIMAL
Definition: conv_generated.h:49
VT_OUTPUT_PADDING
Definition: conv_generated.h:861
VT_ACTIVATION
Definition: conv_generated.h:44
VT_OUT_CHANNELS
Definition: conv_generated.h:325
VT_ALWAYS_PREPROCESS_WEIGHTS
Definition: conv_generated.h:56
VT_INPUT_TENSOR_LAYOUT
Definition: conv_generated.h:322
VT_HAS_BIAS
Definition: conv_generated.h:333
VT_MEMORY_CONFIG
Definition: data_movement_generated.h:278
inline ::flatbuffers::Offset< Conv2dConfig > CreateConv2dConfig(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::DataType > weights_dtype=::flatbuffers::nullopt, ::flatbuffers::Offset<::flatbuffers::String > activation=0, ::flatbuffers::Optional< bool > deallocate_activation=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > reallocate_halo_output=::flatbuffers::nullopt, ::flatbuffers::Optional< uint32_t > act_block_h_override=::flatbuffers::nullopt, ::flatbuffers::Optional< uint32_t > act_block_w_div=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > reshard_if_not_optimal=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > override_sharding_config=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, ::flatbuffers::Optional< bool > transpose_shards=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::TensorLayout > output_layout=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > preprocess_weights_on_device=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > always_preprocess_weights=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_act_double_buffer=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_weights_double_buffer=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_split_reader=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_subblock_padding=::flatbuffers::nullopt)
Definition: conv_generated.h:218
inline ::flatbuffers::Offset< Conv2dOp > CreateConv2dOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:756
inline ::flatbuffers::Offset< Conv2dOp > CreateConv2dOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:799
inline ::flatbuffers::Offset< ConvTranspose2dOp > CreateConvTranspose2dOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> output_padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
Definition: conv_generated.h:1017
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > CreatePrepareConv2dWeightsOp(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, ::flatbuffers::Offset<::flatbuffers::String > weights_format=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding=0, ::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation=0, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:495
inline ::flatbuffers::Offset< ConvTranspose2dOp > CreateConvTranspose2dOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > input=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *output_padding=nullptr, const std::vector< int32_t > *dilation=nullptr, uint32_t groups=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config=0)
Definition: conv_generated.h:1062
inline ::flatbuffers::Offset< PrepareConv2dWeightsOp > CreatePrepareConv2dWeightsOpDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor=0, ::flatbuffers::Offset< tt::target::ttnn::TensorRef > out=0, ::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config=0, tt::target::TensorLayout input_tensor_layout=tt::target::TensorLayout::RowMajor, const char *weights_format=nullptr, uint32_t in_channels=0, uint32_t out_channels=0, uint32_t batch_size=0, uint32_t input_height=0, uint32_t input_width=0, const std::vector< int32_t > *kernel_size=nullptr, const std::vector< int32_t > *stride=nullptr, const std::vector< int32_t > *padding=nullptr, const std::vector< int32_t > *dilation=nullptr, bool has_bias=false, uint32_t groups=0, ::flatbuffers::Offset< tt::target::DeviceRef > device=0, ::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config=0)
Definition: conv_generated.h:542
TensorMemoryLayout
Definition: types_generated.h:62
inline ::flatbuffers::Offset< Conv2dConfig > CreateConv2dConfigDirect(::flatbuffers::FlatBufferBuilder &_fbb, ::flatbuffers::Optional< tt::target::DataType > dtype=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::DataType > weights_dtype=::flatbuffers::nullopt, const char *activation=nullptr, ::flatbuffers::Optional< bool > deallocate_activation=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > reallocate_halo_output=::flatbuffers::nullopt, ::flatbuffers::Optional< uint32_t > act_block_h_override=::flatbuffers::nullopt, ::flatbuffers::Optional< uint32_t > act_block_w_div=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > reshard_if_not_optimal=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > override_sharding_config=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout=::flatbuffers::nullopt, ::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid=0, ::flatbuffers::Optional< bool > transpose_shards=::flatbuffers::nullopt, ::flatbuffers::Optional< tt::target::TensorLayout > output_layout=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > preprocess_weights_on_device=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > always_preprocess_weights=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_act_double_buffer=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_weights_double_buffer=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_split_reader=::flatbuffers::nullopt, ::flatbuffers::Optional< bool > enable_subblock_padding=::flatbuffers::nullopt)
Definition: conv_generated.h:267
TensorLayout
Definition: types_generated.h:250
DataType
Definition: types_generated.h:81
Definition: debug_info_generated.h:18
Definition: debug_info_generated.h:36
Definition: conv_generated.h:146
void add_enable_subblock_padding(bool enable_subblock_padding)
Definition: conv_generated.h:204
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:148
void add_activation(::flatbuffers::Offset<::flatbuffers::String > activation)
Definition: conv_generated.h:156
void add_enable_split_reader(bool enable_split_reader)
Definition: conv_generated.h:201
void add_reallocate_halo_output(bool reallocate_halo_output)
Definition: conv_generated.h:162
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:149
void add_weights_dtype(tt::target::DataType weights_dtype)
Definition: conv_generated.h:153
void add_enable_act_double_buffer(bool enable_act_double_buffer)
Definition: conv_generated.h:195
void add_shard_layout(tt::target::ttnn::TensorMemoryLayout shard_layout)
Definition: conv_generated.h:177
void add_core_grid(::flatbuffers::Offset< tt::target::ttnn::CoreRangeSet > core_grid)
Definition: conv_generated.h:180
void add_transpose_shards(bool transpose_shards)
Definition: conv_generated.h:183
void add_override_sharding_config(bool override_sharding_config)
Definition: conv_generated.h:174
void add_preprocess_weights_on_device(bool preprocess_weights_on_device)
Definition: conv_generated.h:189
void add_enable_weights_double_buffer(bool enable_weights_double_buffer)
Definition: conv_generated.h:198
::flatbuffers::Offset< Conv2dConfig > Finish()
Definition: conv_generated.h:211
void add_reshard_if_not_optimal(bool reshard_if_not_optimal)
Definition: conv_generated.h:171
void add_dtype(tt::target::DataType dtype)
Definition: conv_generated.h:150
void add_act_block_h_override(uint32_t act_block_h_override)
Definition: conv_generated.h:165
void add_deallocate_activation(bool deallocate_activation)
Definition: conv_generated.h:159
Conv2dConfigBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:207
void add_always_preprocess_weights(bool always_preprocess_weights)
Definition: conv_generated.h:192
Conv2dConfig Table
Definition: conv_generated.h:147
void add_act_block_w_div(uint32_t act_block_w_div)
Definition: conv_generated.h:168
void add_output_layout(tt::target::TensorLayout output_layout)
Definition: conv_generated.h:186
Definition: conv_generated.h:262
static constexpr auto Create
Definition: conv_generated.h:264
Conv2dConfig type
Definition: conv_generated.h:263
Definition: conv_generated.h:693
void add_in_channels(uint32_t in_channels)
Definition: conv_generated.h:712
void add_conv2d_config(::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config)
Definition: conv_generated.h:742
void add_input_width(uint32_t input_width)
Definition: conv_generated.h:724
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: conv_generated.h:736
Conv2dOp Table
Definition: conv_generated.h:694
void add_out_channels(uint32_t out_channels)
Definition: conv_generated.h:715
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: conv_generated.h:733
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: conv_generated.h:730
::flatbuffers::Offset< Conv2dOp > Finish()
Definition: conv_generated.h:749
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: conv_generated.h:709
Conv2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:745
void add_groups(uint32_t groups)
Definition: conv_generated.h:739
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: conv_generated.h:706
void add_weight(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight)
Definition: conv_generated.h:700
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: conv_generated.h:727
void add_input(::flatbuffers::Offset< tt::target::ttnn::TensorRef > input)
Definition: conv_generated.h:697
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:696
void add_batch_size(uint32_t batch_size)
Definition: conv_generated.h:718
void add_input_height(uint32_t input_height)
Definition: conv_generated.h:721
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: conv_generated.h:703
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:695
Definition: conv_generated.h:794
static constexpr auto Create
Definition: conv_generated.h:796
Conv2dOp type
Definition: conv_generated.h:795
Definition: conv_generated.h:951
void add_input(::flatbuffers::Offset< tt::target::ttnn::TensorRef > input)
Definition: conv_generated.h:955
void add_groups(uint32_t groups)
Definition: conv_generated.h:1000
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: conv_generated.h:967
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: conv_generated.h:964
ConvTranspose2dOp Table
Definition: conv_generated.h:952
void add_input_height(uint32_t input_height)
Definition: conv_generated.h:979
void add_weight(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight)
Definition: conv_generated.h:958
::flatbuffers::Offset< ConvTranspose2dOp > Finish()
Definition: conv_generated.h:1010
ConvTranspose2dOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:1006
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:954
void add_in_channels(uint32_t in_channels)
Definition: conv_generated.h:970
void add_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > memory_config)
Definition: conv_generated.h:1003
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: conv_generated.h:988
void add_batch_size(uint32_t batch_size)
Definition: conv_generated.h:976
void add_out_channels(uint32_t out_channels)
Definition: conv_generated.h:973
void add_bias(::flatbuffers::Offset< tt::target::ttnn::TensorRef > bias)
Definition: conv_generated.h:961
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: conv_generated.h:985
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:953
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: conv_generated.h:991
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: conv_generated.h:997
void add_output_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> output_padding)
Definition: conv_generated.h:994
void add_input_width(uint32_t input_width)
Definition: conv_generated.h:982
Definition: conv_generated.h:1057
ConvTranspose2dOp type
Definition: conv_generated.h:1058
static constexpr auto Create
Definition: conv_generated.h:1059
const tt::target::ttnn::TensorRef * bias() const
Definition: conv_generated.h:619
const tt::target::ttnn::TensorRef * weight_tensor() const
Definition: conv_generated.h:338
uint32_t in_channels() const
Definition: conv_generated.h:353
::flatbuffers::Optional< bool > preprocess_weights_on_device() const
Definition: conv_generated.h:101
uint32_t batch_size() const
Definition: conv_generated.h:359
const tt::target::ttnn::MemoryConfig * input_memory_config() const
Definition: conv_generated.h:344
ConvTranspose2dOpBuilder Builder
Definition: conv_generated.h:842
::flatbuffers::Optional< bool > enable_split_reader() const
Definition: conv_generated.h:113
uint32_t input_width() const
Definition: conv_generated.h:365
::flatbuffers::Optional< bool > enable_act_double_buffer() const
Definition: conv_generated.h:107
::flatbuffers::Optional< tt::target::ttnn::TensorMemoryLayout > shard_layout() const
Definition: conv_generated.h:89
const tt::target::ttnn::TensorRef * out() const
Definition: conv_generated.h:341
const tt::target::ttnn::MemoryConfig * memory_config() const
Definition: conv_generated.h:914
const tt::target::ttnn::Conv2dConfig * conv2d_config() const
Definition: conv_generated.h:389
::flatbuffers::Optional< tt::target::TensorLayout > output_layout() const
Definition: conv_generated.h:98
bool has_bias() const
Definition: conv_generated.h:380
const ::flatbuffers::Vector< int32_t > * dilation() const
Definition: conv_generated.h:377
const tt::target::ttnn::TensorRef * input() const
Definition: conv_generated.h:613
const ::flatbuffers::String * activation() const
Definition: conv_generated.h:68
uint32_t input_height() const
Definition: conv_generated.h:362
const ::flatbuffers::Vector< int32_t > * stride() const
Definition: conv_generated.h:371
::flatbuffers::Optional< tt::target::DataType > dtype() const
Definition: conv_generated.h:62
const ::flatbuffers::Vector< int32_t > * padding() const
Definition: conv_generated.h:374
const tt::target::ttnn::TensorRef * weight() const
Definition: conv_generated.h:616
Conv2dConfigBuilder Builder
Definition: conv_generated.h:36
const ::flatbuffers::Vector< int32_t > * output_padding() const
Definition: conv_generated.h:905
::flatbuffers::Optional< bool > override_sharding_config() const
Definition: conv_generated.h:86
Conv2dOpBuilder Builder
Definition: conv_generated.h:590
const ::flatbuffers::Vector< int32_t > * kernel_size() const
Definition: conv_generated.h:368
bool Verify(::flatbuffers::Verifier &verifier) const
Definition: conv_generated.h:119
::flatbuffers::Optional< bool > enable_subblock_padding() const
Definition: conv_generated.h:116
tt::target::TensorLayout input_tensor_layout() const
Definition: conv_generated.h:347
::flatbuffers::Optional< bool > transpose_shards() const
Definition: conv_generated.h:95
uint32_t groups() const
Definition: conv_generated.h:383
const ::flatbuffers::String * weights_format() const
Definition: conv_generated.h:350
::flatbuffers::Optional< bool > reshard_if_not_optimal() const
Definition: conv_generated.h:83
::flatbuffers::Optional< bool > enable_weights_double_buffer() const
Definition: conv_generated.h:110
uint32_t out_channels() const
Definition: conv_generated.h:356
const tt::target::ttnn::CoreRangeSet * core_grid() const
Definition: conv_generated.h:92
::flatbuffers::Optional< uint32_t > act_block_h_override() const
Definition: conv_generated.h:77
::flatbuffers::Optional< tt::target::DataType > weights_dtype() const
Definition: conv_generated.h:65
static FLATBUFFERS_CONSTEXPR_CPP11 const char * GetFullyQualifiedName()
Definition: conv_generated.h:38
::flatbuffers::Optional< bool > deallocate_activation() const
Definition: conv_generated.h:71
::flatbuffers::Optional< bool > reallocate_halo_output() const
Definition: conv_generated.h:74
const tt::target::DeviceRef * device() const
Definition: conv_generated.h:386
PrepareConv2dWeightsOpBuilder Builder
Definition: conv_generated.h:313
::flatbuffers::Optional< bool > always_preprocess_weights() const
Definition: conv_generated.h:104
::flatbuffers::Optional< uint32_t > act_block_w_div() const
Definition: conv_generated.h:80
Definition: conv_generated.h:426
void add_input_height(uint32_t input_height)
Definition: conv_generated.h:454
void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> dilation)
Definition: conv_generated.h:469
void add_input_tensor_layout(tt::target::TensorLayout input_tensor_layout)
Definition: conv_generated.h:439
PrepareConv2dWeightsOp Table
Definition: conv_generated.h:427
PrepareConv2dWeightsOpBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
Definition: conv_generated.h:484
::flatbuffers::FlatBufferBuilder & fbb_
Definition: conv_generated.h:428
void add_has_bias(bool has_bias)
Definition: conv_generated.h:472
void add_in_channels(uint32_t in_channels)
Definition: conv_generated.h:445
void add_padding(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> padding)
Definition: conv_generated.h:466
void add_out(::flatbuffers::Offset< tt::target::ttnn::TensorRef > out)
Definition: conv_generated.h:433
void add_device(::flatbuffers::Offset< tt::target::DeviceRef > device)
Definition: conv_generated.h:478
void add_weights_format(::flatbuffers::Offset<::flatbuffers::String > weights_format)
Definition: conv_generated.h:442
void add_conv2d_config(::flatbuffers::Offset< tt::target::ttnn::Conv2dConfig > conv2d_config)
Definition: conv_generated.h:481
void add_out_channels(uint32_t out_channels)
Definition: conv_generated.h:448
void add_input_memory_config(::flatbuffers::Offset< tt::target::ttnn::MemoryConfig > input_memory_config)
Definition: conv_generated.h:436
void add_kernel_size(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> kernel_size)
Definition: conv_generated.h:460
void add_stride(::flatbuffers::Offset<::flatbuffers::Vector< int32_t >> stride)
Definition: conv_generated.h:463
void add_groups(uint32_t groups)
Definition: conv_generated.h:475
void add_batch_size(uint32_t batch_size)
Definition: conv_generated.h:451
void add_weight_tensor(::flatbuffers::Offset< tt::target::ttnn::TensorRef > weight_tensor)
Definition: conv_generated.h:430
::flatbuffers::Offset< PrepareConv2dWeightsOp > Finish()
Definition: conv_generated.h:488
::flatbuffers::uoffset_t start_
Definition: conv_generated.h:429
void add_input_width(uint32_t input_width)
Definition: conv_generated.h:457
Definition: conv_generated.h:537
static constexpr auto Create
Definition: conv_generated.h:539
PrepareConv2dWeightsOp type
Definition: conv_generated.h:538