ttnn.max_pool2d

NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.max_pool2dtensor<[1,1,12544,128,bf16]>
tensor<[1,1,3136,128,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 12544 + d2, d3), memory_config: (12544, 128, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 128 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 112 : si32
input_width: 112 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,784,128,bf16]>
tensor<[1,1,196,128,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 128 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 28 : si32
input_width: 28 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,3136,128,bf16]>
tensor<[1,1,784,128,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 128 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 56 : si32
input_width: 56 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,4096,128,bf16]>
tensor<[1,1,1024,128,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 4096 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 1024 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 128 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 64 : si32
input_width: 64 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 1024 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,784,16,bf16]>
tensor<[1,1,196,16,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 16, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 16 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 28 : si32
input_width: 28 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,196,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 16, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,5041,192,bf16]>
tensor<[1,1,1225,192,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5041 + d1 * 5041 + d2, d3), memory_config: (5041, 192, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 1225 + d2, d3), memory_config: (1225, 192, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 192 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 71 : si32
input_width: 71 : si32
kernel_height: 3 : si32
kernel_width: 3 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,1225,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 1225 + d2, d3), memory_config: (1225, 192, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,196,256,bf16]>
tensor<[1,1,49,256,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 256, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 256 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 14 : si32
input_width: 14 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,49,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 256, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,1024,256,bf16]>
tensor<[1,1,256,256,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 1024 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 256 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 32 : si32
input_width: 32 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,3136,256,bf16]>
tensor<[1,1,784,256,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 256 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 56 : si32
input_width: 56 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,784,320,bf16]>
tensor<[1,1,196,320,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 320, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 320, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 320 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 28 : si32
input_width: 28 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,196,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 320, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,12544,32,bf16]>
tensor<[1,1,3136,32,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 12544 + d2, d3), memory_config: (12544, 32, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 32, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 32 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 112 : si32
input_width: 112 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,3136,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,65536,32,bf16]>
tensor<[1,1,16384,32,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 65536 + d2, d3), memory_config: (65536, 32, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16384 + d2, d3), memory_config: (16384, 32, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 32 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 256 : si32
input_width: 256 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,16384,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16384 + d2, d3), memory_config: (16384, 32, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,1225,384,bf16]>
tensor<[1,1,289,384,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 1225 + d2, d3), memory_config: (1225, 384, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 289 + d2, d3), memory_config: (289, 384, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 384 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 35 : si32
input_width: 35 : si32
kernel_height: 3 : si32
kernel_width: 3 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,289,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 289 + d2, d3), memory_config: (289, 384, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,196,4,bf16]>
tensor<[1,1,49,4,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 4, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 4, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 4 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 14 : si32
input_width: 14 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,49,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 4, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,196,512,bf16]>
tensor<[1,1,49,512,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 512 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 14 : si32
input_width: 14 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,784,512,bf16]>
tensor<[1,1,196,512,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 512 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 28 : si32
input_width: 28 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,196,640,bf16]>
tensor<[1,1,49,640,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 640, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 640, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 640 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 14 : si32
input_width: 14 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,49,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 640, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,12544,64,bf16]>
tensor<[1,1,3136,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 12544 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 112 : si32
input_width: 112 : si32
kernel_height: 3 : si32
kernel_width: 3 : si32
padding_height: 1 : si32
padding_width: 1 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,16384,64,bf16]>
tensor<[1,1,4096,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16384 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 4096 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 128 : si32
input_width: 128 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 4096 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,21609,64,bf16]>
tensor<[1,1,5329,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21609 + d1 * 21609 + d2, d3), memory_config: (21609, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 5329 + d2, d3), memory_config: (5329, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 147 : si32
input_width: 147 : si32
kernel_height: 3 : si32
kernel_width: 3 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,5329,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 5329 + d2, d3), memory_config: (5329, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,50176,64,bf16]>
tensor<[1,1,12544,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 50176 + d2, d3), memory_config: (50176, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 12544 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 224 : si32
input_width: 224 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 12544 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,576,64,bf16]>
tensor<[1,1,144,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 576 + d2, d3), memory_config: (576, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 144 + d2, d3), memory_config: (144, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 24 : si32
input_width: 24 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,144,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 144 + d2, d3), memory_config: (144, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,230400,64,bf16]>
tensor<[1,1,57600,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 230400 + d1 * 230400 + d2, d3), memory_config: (230400, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 57600 + d2, d3), memory_config: (57600, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 360 : si32
input_width: 640 : si32
kernel_height: 3 : si32
kernel_width: 3 : si32
padding_height: 1 : si32
padding_width: 1 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,57600,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 57600 + d2, d3), memory_config: (57600, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,3136,64,bf16]>
tensor<[1,1,784,64,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 3136 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 64, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 64 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 56 : si32
input_width: 56 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,784,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 784 + d2, d3), memory_config: (784, 64, 'bf16', 'dram')nannan
ttnn.max_pool2dtensor<[1,1,196,832,bf16]>
tensor<[1,1,49,832,bf16]>
!tt.device<#device>
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 196 + d2, d3), memory_config: (196, 832, 'bf16', 'dram')
mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 832, 'bf16', 'dram')
batch_size: 1 : si32
ceil_mode: False
channels: 832 : si32
dilation_height: 1 : si32
dilation_width: 1 : si32
input_height: 14 : si32
input_width: 14 : si32
kernel_height: 2 : si32
kernel_width: 2 : si32
padding_height: 0 : si32
padding_width: 0 : si32
stride_height: 2 : si32
stride_width: 2 : si32
tensor<[1,1,49,832,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 49 + d2, d3), memory_config: (49, 832, 'bf16', 'dram')nannan