ttnn.empty

NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x12x12x12>
tensor<[1,12,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x12x12x12>
tensor<[1,12,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x12x14x14>
tensor<[1,12,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 14 + d2, d3), memory_config: (6, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x12x14x14>
tensor<[1,12,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 14 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<563x47>>, >
shape: #ttnn.shape<1x12x1500x1500>
tensor<[1,12,1500,1500,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18000 + d1 * 1500 + d2, d3), memory_config: (563, 47, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<563x47>>, >
shape: #ttnn.shape<1x12x1500x1500>
tensor<[1,12,1500,1500,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18000 + d1 * 1500 + d2, d3), memory_config: (563, 47, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x12x16x16>
tensor<[1,12,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 16 + d2, d3), memory_config: (6, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x12x16x16>
tensor<[1,12,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 16 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x7>>, >
shape: #ttnn.shape<1x12x197x197>
tensor<[1,12,197,197,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2364 + d1 * 197 + d2, d3), memory_config: (74, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x7>>, >
shape: #ttnn.shape<1x12x197x197>
tensor<[1,12,197,197,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2364 + d1 * 197 + d2, d3), memory_config: (74, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x13>
tensor<[1,12,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x13>
tensor<[1,12,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x14>
tensor<[1,12,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x14>
tensor<[1,12,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x15>
tensor<[1,12,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x15>
tensor<[1,12,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x16>
tensor<[1,12,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x16>
tensor<[1,12,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x17>
tensor<[1,12,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x17>
tensor<[1,12,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x18>
tensor<[1,12,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x18>
tensor<[1,12,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x19>
tensor<[1,12,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x19>
tensor<[1,12,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x20>
tensor<[1,12,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x20>
tensor<[1,12,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x21>
tensor<[1,12,1,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x21>
tensor<[1,12,1,21,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x22>
tensor<[1,12,1,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x22>
tensor<[1,12,1,22,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x23>
tensor<[1,12,1,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x23>
tensor<[1,12,1,23,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x24>
tensor<[1,12,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x24>
tensor<[1,12,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x25>
tensor<[1,12,1,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x25>
tensor<[1,12,1,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x26>
tensor<[1,12,1,26,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x26>
tensor<[1,12,1,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x27>
tensor<[1,12,1,27,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x27>
tensor<[1,12,1,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x28>
tensor<[1,12,1,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x28>
tensor<[1,12,1,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x29>
tensor<[1,12,1,29,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x29>
tensor<[1,12,1,29,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x12x25x25>
tensor<[1,12,25,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 25 + d2, d3), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x12x25x25>
tensor<[1,12,25,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 25 + d2, d3), memory_config: (10, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<1x12x50x50>
tensor<[1,12,50,50,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 600 + d1 * 50 + d2, d3), memory_config: (19, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<1x12x50x50>
tensor<[1,12,50,50,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 600 + d1 * 50 + d2, d3), memory_config: (19, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x12x7x7>
tensor<[1,12,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 7 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x12x7x7>
tensor<[1,12,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 7 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x9x9>
tensor<[1,12,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 108 + d1 * 9 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x9x9>
tensor<[1,12,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 108 + d1 * 9 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<685x43>>, >
shape: #ttnn.shape<1x16x1370x1370>
tensor<[1,16,1370,1370,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21920 + d1 * 1370 + d2, d3), memory_config: (685, 43, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<685x43>>, >
shape: #ttnn.shape<1x16x1370x1370>
tensor<[1,16,1370,1370,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21920 + d1 * 1370 + d2, d3), memory_config: (685, 43, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x10>
tensor<[1,16,1,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x10>
tensor<[1,16,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x11>
tensor<[1,16,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x11>
tensor<[1,16,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x12>
tensor<[1,16,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x12>
tensor<[1,16,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x13>
tensor<[1,16,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x13>
tensor<[1,16,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x14>
tensor<[1,16,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x14>
tensor<[1,16,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x15>
tensor<[1,16,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x15>
tensor<[1,16,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x16>
tensor<[1,16,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x16>
tensor<[1,16,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x17>
tensor<[1,16,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x17>
tensor<[1,16,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x18>
tensor<[1,16,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x18>
tensor<[1,16,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x19>
tensor<[1,16,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x19>
tensor<[1,16,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x20>
tensor<[1,16,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x20>
tensor<[1,16,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x21>
tensor<[1,16,1,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x21>
tensor<[1,16,1,21,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x22>
tensor<[1,16,1,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x22>
tensor<[1,16,1,22,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x23>
tensor<[1,16,1,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x23>
tensor<[1,16,1,23,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x24>
tensor<[1,16,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x24>
tensor<[1,16,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x25>
tensor<[1,16,1,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x25>
tensor<[1,16,1,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x26>
tensor<[1,16,1,26,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x26>
tensor<[1,16,1,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x27>
tensor<[1,16,1,27,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x27>
tensor<[1,16,1,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x28>
tensor<[1,16,1,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x28>
tensor<[1,16,1,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x29>
tensor<[1,16,1,29,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x29>
tensor<[1,16,1,29,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x7>
tensor<[1,16,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x7>
tensor<[1,16,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x8>
tensor<[1,16,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x8>
tensor<[1,16,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x9>
tensor<[1,16,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x9>
tensor<[1,16,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<1x16x256x256>
tensor<[1,16,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 256 + d2, d3), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<1x16x256x256>
tensor<[1,16,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 256 + d2, d3), memory_config: (128, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x16x6x6>
tensor<[1,16,6,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 6 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x16x6x6>
tensor<[1,16,6,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 6 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x16x9x9>
tensor<[1,16,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x16x9x9>
tensor<[1,16,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x24x32x32>
tensor<[1,24,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x24x32x32>
tensor<[1,24,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x28x13x13>
tensor<[1,28,13,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x28x13x13>
tensor<[1,28,13,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x32x32x32>
tensor<[1,32,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x32x32x32>
tensor<[1,32,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x46>>, >
shape: #ttnn.shape<1x3x1445x1445>
tensor<[1,3,1445,1445,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4335 + d1 * 1445 + d2, d3), memory_config: (136, 46, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x46>>, >
shape: #ttnn.shape<1x3x1445x1445>
tensor<[1,3,1445,1445,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4335 + d1 * 1445 + d2, d3), memory_config: (136, 46, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x64x9x9>
tensor<[1,64,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 9 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x64x9x9>
tensor<[1,64,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 9 + d2, d3), memory_config: (18, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x71x7x7>
tensor<[1,71,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x71x7x7>
tensor<[1,71,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x32>>, >
shape: #ttnn.shape<1x8x1024x1024>
tensor<[1,8,1024,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 1024 + d2, d3), memory_config: (256, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x32>>, >
shape: #ttnn.shape<1x8x1024x1024>
tensor<[1,8,1024,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 1024 + d2, d3), memory_config: (256, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x8x1024x9>
tensor<[1,8,1024,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 1024 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x8x1024x9>
tensor<[1,8,1024,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 1024 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<1x8x256x256>
tensor<[1,8,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<1x8x256x256>
tensor<[1,8,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x8x256x9>
tensor<[1,8,256,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x8x256x9>
tensor<[1,8,256,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x8x4096x4096>
tensor<[1,8,4096,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 4096 + d2, d3), memory_config: (1024, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x8x4096x4096>
tensor<[1,8,4096,4096,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 4096 + d2, d3), memory_config: (1024, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x1>>, >
shape: #ttnn.shape<1x8x4096x9>
tensor<[1,8,4096,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 4096 + d2, d3), memory_config: (1024, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x1>>, >
shape: #ttnn.shape<1x8x4096x9>
tensor<[1,8,4096,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 4096 + d2, d3), memory_config: (1024, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x8x64x64>
tensor<[1,8,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 64 + d2, d3), memory_config: (16, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x8x64x64>
tensor<[1,8,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 64 + d2, d3), memory_config: (16, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x8x64x9>
tensor<[1,8,64,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 64 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x8x64x9>
tensor<[1,8,64,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 64 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<2x8x7x7>
tensor<[2,8,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 7 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<2x8x7x7>
tensor<[2,8,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 7 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<4x16x1x1>
tensor<[4,16,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<4x16x1x1>
tensor<[4,16,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3267x5>>, >
shape: #ttnn.shape<121x6x144x144>
tensor<[121,6,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 144 + d2, d3), memory_config: (3267, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x2>>, >
shape: #ttnn.shape<16x8x49x49>
tensor<[16,8,49,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 392 + d1 * 49 + d2, d3), memory_config: (196, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x10x10>
tensor<[1,12,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 10 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x7>>, >
shape: #ttnn.shape<1x12x197x197>
tensor<[1,12,197,197,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2364 + d1 * 197 + d2, d3), memory_config: (74, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x10>
tensor<[1,12,1,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x11>
tensor<[1,12,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x12>
tensor<[1,12,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x13>
tensor<[1,12,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x14>
tensor<[1,12,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x15>
tensor<[1,12,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x16>
tensor<[1,12,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x17>
tensor<[1,12,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x18>
tensor<[1,12,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x19>
tensor<[1,12,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x1>
tensor<[1,12,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x20>
tensor<[1,12,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x2>
tensor<[1,12,1,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x3>
tensor<[1,12,1,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x46>
tensor<[1,12,1,46,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x47>
tensor<[1,12,1,47,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x48>
tensor<[1,12,1,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x49>
tensor<[1,12,1,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x4>
tensor<[1,12,1,4,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x50>
tensor<[1,12,1,50,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x51>
tensor<[1,12,1,51,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x52>
tensor<[1,12,1,52,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x53>
tensor<[1,12,1,53,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x54>
tensor<[1,12,1,54,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x55>
tensor<[1,12,1,55,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x56>
tensor<[1,12,1,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x57>
tensor<[1,12,1,57,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x58>
tensor<[1,12,1,58,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x59>
tensor<[1,12,1,59,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x5>
tensor<[1,12,1,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x60>
tensor<[1,12,1,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x61>
tensor<[1,12,1,61,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x62>
tensor<[1,12,1,62,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x63>
tensor<[1,12,1,63,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x64>
tensor<[1,12,1,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x65>
tensor<[1,12,1,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x66>
tensor<[1,12,1,66,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x67>
tensor<[1,12,1,67,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x68>
tensor<[1,12,1,68,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x69>
tensor<[1,12,1,69,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x6>
tensor<[1,12,1,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x70>
tensor<[1,12,1,70,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x71>
tensor<[1,12,1,71,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x72>
tensor<[1,12,1,72,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x73>
tensor<[1,12,1,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x74>
tensor<[1,12,1,74,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x75>
tensor<[1,12,1,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x76>
tensor<[1,12,1,76,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x77>
tensor<[1,12,1,77,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x78>
tensor<[1,12,1,78,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x79>
tensor<[1,12,1,79,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x7>
tensor<[1,12,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x80>
tensor<[1,12,1,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x81>
tensor<[1,12,1,81,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x82>
tensor<[1,12,1,82,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x83>
tensor<[1,12,1,83,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x84>
tensor<[1,12,1,84,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x85>
tensor<[1,12,1,85,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x86>
tensor<[1,12,1,86,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x87>
tensor<[1,12,1,87,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x88>
tensor<[1,12,1,88,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x89>
tensor<[1,12,1,89,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x8>
tensor<[1,12,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x90>
tensor<[1,12,1,90,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x91>
tensor<[1,12,1,91,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x92>
tensor<[1,12,1,92,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x93>
tensor<[1,12,1,93,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x94>
tensor<[1,12,1,94,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x95>
tensor<[1,12,1,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x96>
tensor<[1,12,1,96,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x97>
tensor<[1,12,1,97,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x98>
tensor<[1,12,1,98,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x99>
tensor<[1,12,1,99,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x9>
tensor<[1,12,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<76x7>>, >
shape: #ttnn.shape<1x12x201x201>
tensor<[1,12,201,201,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2412 + d1 * 201 + d2, d3), memory_config: (76, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<97x9>>, >
shape: #ttnn.shape<1x12x257x257>
tensor<[1,12,257,257,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3084 + d1 * 257 + d2, d3), memory_config: (97, 9, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x2>>, >
shape: #ttnn.shape<1x12x45x45>
tensor<[1,12,45,45,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 540 + d1 * 45 + d2, d3), memory_config: (17, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x12x8x8>
tensor<[1,12,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 8 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x16x10x10>
tensor<[1,16,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 160 + d1 * 10 + d2, d3), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x7>>, >
shape: #ttnn.shape<1x16x197x197>
tensor<[1,16,197,197,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3152 + d1 * 197 + d2, d3), memory_config: (99, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x10>
tensor<[1,16,1,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x11>
tensor<[1,16,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x12>
tensor<[1,16,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x13>
tensor<[1,16,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x14>
tensor<[1,16,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x15>
tensor<[1,16,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x16>
tensor<[1,16,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x17>
tensor<[1,16,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x18>
tensor<[1,16,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x19>
tensor<[1,16,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x1>
tensor<[1,16,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x20>
tensor<[1,16,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x21>
tensor<[1,16,1,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x22>
tensor<[1,16,1,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x23>
tensor<[1,16,1,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x24>
tensor<[1,16,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x2>
tensor<[1,16,1,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x3>
tensor<[1,16,1,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x4>
tensor<[1,16,1,4,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x5>
tensor<[1,16,1,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x6>
tensor<[1,16,1,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x7>
tensor<[1,16,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x8>
tensor<[1,16,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x9>
tensor<[1,16,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x16x32x32>
tensor<[1,16,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 32 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x16x5x5>
tensor<[1,16,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 5 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x1x16384x256>
tensor<[1,1,16384,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16384 + d2, d3), memory_config: (512, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x10>>, >
shape: #ttnn.shape<1x1x19200x300>
tensor<[1,1,19200,300,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 19200 + d2, d3), memory_config: (600, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x9>>, >
shape: #ttnn.shape<1x27x257>
tensor<[1,27,257,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 9, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x2x4096x256>
tensor<[1,2,4096,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 4096 + d2, d3), memory_config: (256, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x10>>, >
shape: #ttnn.shape<1x2x4800x300>
tensor<[1,2,4800,300,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 4800 + d2, d3), memory_config: (300, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x2>>, >
shape: #ttnn.shape<1x32x49x49>
tensor<[1,32,49,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 49 + d2, d3), memory_config: (49, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<1x50257>
tensor<[1,50257,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x8>>, >
shape: #ttnn.shape<1x5x1024x256>
tensor<[1,5,1024,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 1024 + d2, d3), memory_config: (160, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<188x10>>, >
shape: #ttnn.shape<1x5x1200x300>
tensor<[1,5,1200,300,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6000 + d1 * 1200 + d2, d3), memory_config: (188, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x6x15x15>
tensor<[1,6,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 90 + d1 * 15 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x10>
tensor<[1,6,1,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x11>
tensor<[1,6,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x12>
tensor<[1,6,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x13>
tensor<[1,6,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x14>
tensor<[1,6,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x15>
tensor<[1,6,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x16>
tensor<[1,6,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x17>
tensor<[1,6,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x18>
tensor<[1,6,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x19>
tensor<[1,6,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x1>
tensor<[1,6,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x20>
tensor<[1,6,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x2>
tensor<[1,6,1,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x3>
tensor<[1,6,1,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x4>
tensor<[1,6,1,4,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x5>
tensor<[1,6,1,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x6>
tensor<[1,6,1,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x7>
tensor<[1,6,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x8>
tensor<[1,6,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x9>
tensor<[1,6,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x8x10x10>
tensor<[1,8,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 10 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x10>
tensor<[1,8,1,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x11>
tensor<[1,8,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x12>
tensor<[1,8,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x13>
tensor<[1,8,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x14>
tensor<[1,8,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x15>
tensor<[1,8,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x16>
tensor<[1,8,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x17>
tensor<[1,8,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x18>
tensor<[1,8,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x19>
tensor<[1,8,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x1>
tensor<[1,8,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x20>
tensor<[1,8,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x2>
tensor<[1,8,1,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x3>
tensor<[1,8,1,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x4>
tensor<[1,8,1,4,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x5>
tensor<[1,8,1,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x6>
tensor<[1,8,1,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x7>
tensor<[1,8,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x8>
tensor<[1,8,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x9>
tensor<[1,8,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x8x2048x256>
tensor<[1,8,2048,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 2048 + d2, d3), memory_config: (512, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x64>>, >
shape: #ttnn.shape<1x8x256x2048>
tensor<[1,8,256,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<1x8x256x256>
tensor<[1,8,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x10>>, >
shape: #ttnn.shape<1x8x300x300>
tensor<[1,8,300,300,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2400 + d1 * 300 + d2, d3), memory_config: (75, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<2x12x13x13>
tensor<[2,12,13,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 156 + d1 * 13 + d2, d3), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<36x12x144x144>
tensor<[36,12,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1728 + d1 * 144 + d2, d3), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3888x5>>, >
shape: #ttnn.shape<36x24x144x144>
tensor<[36,24,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3456 + d1 * 144 + d2, d3), memory_config: (3888, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13068x5>>, >
shape: #ttnn.shape<484x6x144x144>
tensor<[484,6,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 144 + d2, d3), memory_config: (13068, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<4x16x49x49>
tensor<[4,16,49,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 49 + d2, d3), memory_config: (98, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x5>>, >
shape: #ttnn.shape<4x48x144x144>
tensor<[4,48,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6912 + d1 * 144 + d2, d3), memory_config: (864, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<64x1x13>
tensor<[64,1,13,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<64x4x49x49>
tensor<[64,4,49,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 49 + d2, d3), memory_config: (392, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x4>>, >
shape: #ttnn.shape<8x100x100>
tensor<[8,100,100,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x29>>, >
shape: #ttnn.shape<8x100x920>
tensor<[8,100,920,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 29, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<230x29>>, >
shape: #ttnn.shape<8x920x920>
tensor<[8,920,920,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 920 + d1, d2), memory_config: (230, 29, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<972x5>>, >
shape: #ttnn.shape<9x24x144x144>
tensor<[9,24,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3456 + d1 * 144 + d2, d3), memory_config: (972, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<9x48x144x144>
tensor<[9,48,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6912 + d1 * 144 + d2, d3), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<100x1x1>
tensor<[100,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x1x256>
tensor<[100,1,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (4, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x1x256>
tensor<[100,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x64>>, >
shape: #ttnn.shape<100x2048>
tensor<[100,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x256>
tensor<[100,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<100x4>
tensor<[100,4,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<100x92>
tensor<[100,92,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1024x1536>
tensor<[1024,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1024x160>
tensor<[1024,160,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1024x3072>
tensor<[1024,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x160>>, >
shape: #ttnn.shape<1024x5120>
tensor<[1024,5120,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x192>>, >
shape: #ttnn.shape<1024x6144>
tensor<[1024,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1024x640>
tensor<[1024,640,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1024x768>
tensor<[1024,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7813>>, >
shape: #ttnn.shape<10x250002>
tensor<[10,250002,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 7813, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<10x3072>
tensor<[10,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<10x768>
tensor<[10,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x40>>, >
shape: #ttnn.shape<1200x1280>
tensor<[1200,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1200x320>
tensor<[1200,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6534x5>>, >
shape: #ttnn.shape<121x12x144x144>
tensor<[121,12,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1728 + d1 * 144 + d2, d3), memory_config: (6534, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3267x5>>, >
shape: #ttnn.shape<121x6x144x144>
tensor<[121,6,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 144 + d2, d3), memory_config: (3267, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x48>>, >
shape: #ttnn.shape<1296x1536>
tensor<[1296,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x72>>, >
shape: #ttnn.shape<1296x2304>
tensor<[1296,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x144>>, >
shape: #ttnn.shape<1296x4608>
tensor<[1296,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x24>>, >
shape: #ttnn.shape<1296x768>
tensor<[1296,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<12x1536>
tensor<[12,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<12x256>
tensor<[12,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x2>
tensor<[12,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<12x3072>
tensor<[12,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<12x768>
tensor<[12,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1370x1280>
tensor<[1370,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x120>>, >
shape: #ttnn.shape<1370x3840>
tensor<[1370,3840,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 120, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x160>>, >
shape: #ttnn.shape<1370x5120>
tensor<[1370,5120,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x2>
tensor<[13,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<13x3584>
tensor<[13,3584,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 112, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<13x512>
tensor<[13,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1445x192>
tensor<[1445,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x24>>, >
shape: #ttnn.shape<1445x768>
tensor<[1445,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<14x2048>
tensor<[14,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<14x2>
tensor<[14,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<14x3072>
tensor<[14,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<14x512>
tensor<[14,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<14x768>
tensor<[14,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x96>>, >
shape: #ttnn.shape<1500x3072>
tensor<[1500,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1500x768>
tensor<[1500,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<16384x128>
tensor<[16384,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x48>>, >
shape: #ttnn.shape<16384x1536>
tensor<[16384,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<16384x192>
tensor<[16384,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<16384x32>
tensor<[16384,32,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<16384x384>
tensor<[16384,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<16384x768>
tensor<[16384,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<16x3072>
tensor<[16,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<16x768>
tensor<[16,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x2>>, >
shape: #ttnn.shape<16x8x49x49>
tensor<[16,8,49,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 392 + d1 * 49 + d2, d3), memory_config: (196, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x36>>, >
shape: #ttnn.shape<17424x1152>
tensor<[17424,1152,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 36, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x6>>, >
shape: #ttnn.shape<17424x192>
tensor<[17424,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x12>>, >
shape: #ttnn.shape<17424x384>
tensor<[17424,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x18>>, >
shape: #ttnn.shape<17424x576>
tensor<[17424,576,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 18, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x8>>, >
shape: #ttnn.shape<19200x256>
tensor<[19200,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<19200x64>
tensor<[19200,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x48>>, >
shape: #ttnn.shape<196x1536>
tensor<[196,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<196x3072>
tensor<[196,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<196x512>
tensor<[196,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<196x768>
tensor<[196,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<197x1024>
tensor<[197,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<197x3072>
tensor<[197,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x128>>, >
shape: #ttnn.shape<197x4096>
tensor<[197,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<197x768>
tensor<[197,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x100x14x14>
tensor<[1,100,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 14 + d2, d3), memory_config: (44, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1024x10x10>
tensor<[1,1024,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 10 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1024x10x10>
tensor<[1,1024,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 10 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x1024x1536>
tensor<[1,1024,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x1024x1536>
tensor<[1,1024,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1x1024x160>
tensor<[1,1024,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1x1024x160>
tensor<[1,1024,160,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x1024x16x16>
tensor<[1,1024,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16 + d2, d3), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x1024x16x16>
tensor<[1,1024,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16 + d2, d3), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<608x1>>, >
shape: #ttnn.shape<1x1024x19x19>
tensor<[1,1024,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19456 + d1 * 19 + d2, d3), memory_config: (608, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x8>>, >
shape: #ttnn.shape<1x1024x256>
tensor<[1,1024,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x1024x28x28>
tensor<[1,1024,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 28 + d2, d3), memory_config: (896, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x1024x3072>
tensor<[1,1024,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x3>>, >
shape: #ttnn.shape<1x1024x45x80>
tensor<[1,1024,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 45 + d2, d3), memory_config: (1440, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x3>>, >
shape: #ttnn.shape<1x1024x45x80>
tensor<[1,1024,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 45 + d2, d3), memory_config: (1440, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1x1024x768>
tensor<[1,1024,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1x1024x768>
tensor<[1,1024,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<462x1>>, >
shape: #ttnn.shape<1x1056x14x14>
tensor<[1,1056,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14784 + d1 * 14 + d2, d3), memory_config: (462, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<231x1>>, >
shape: #ttnn.shape<1x1056x7x7>
tensor<[1,1056,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7392 + d1 * 7 + d2, d3), memory_config: (231, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<476x1>>, >
shape: #ttnn.shape<1x1088x14x14>
tensor<[1,1088,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15232 + d1 * 14 + d2, d3), memory_config: (476, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x1088x7x7>
tensor<[1,1088,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 7 + d2, d3), memory_config: (238, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x10x1024>
tensor<[1,10,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x10x1536>
tensor<[1,10,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x10x3072>
tensor<[1,10,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x10x512>
tensor<[1,10,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<490x1>>, >
shape: #ttnn.shape<1x1120x14x14>
tensor<[1,1120,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15680 + d1 * 14 + d2, d3), memory_config: (490, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x1>>, >
shape: #ttnn.shape<1x1120x7x7>
tensor<[1,1120,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 7 + d2, d3), memory_config: (245, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x112x14x14>
tensor<[1,112,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 14 + d2, d3), memory_config: (49, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x112x14x14>
tensor<[1,112,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 14 + d2, d3), memory_config: (49, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x112x15x15>
tensor<[1,112,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 15 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x112x15x15>
tensor<[1,112,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 15 + d2, d3), memory_config: (53, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x112x20x20>
tensor<[1,112,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 20 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x112x20x20>
tensor<[1,112,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 20 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x112x24x24>
tensor<[1,112,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 24 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x112x24x24>
tensor<[1,112,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 24 + d2, d3), memory_config: (84, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x112x7x7>
tensor<[1,112,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 7 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x1152x14x14>
tensor<[1,1152,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 14 + d2, d3), memory_config: (504, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x1152x7x7>
tensor<[1,1152,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 7 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x1>>, >
shape: #ttnn.shape<1x1152x8x8>
tensor<[1,1152,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 8 + d2, d3), memory_config: (288, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x116x14x14>
tensor<[1,116,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 14 + d2, d3), memory_config: (51, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<518x1>>, >
shape: #ttnn.shape<1x1184x14x14>
tensor<[1,1184,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16576 + d1 * 14 + d2, d3), memory_config: (518, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<259x1>>, >
shape: #ttnn.shape<1x1184x7x7>
tensor<[1,1184,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8288 + d1 * 7 + d2, d3), memory_config: (259, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x11x1536>
tensor<[1,11,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11x1>
tensor<[1,11,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x11x3072>
tensor<[1,11,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x1200x1>
tensor<[1,1200,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1x1200x320>
tensor<[1,1200,320,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1x1200x320>
tensor<[1,1200,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x120x14x14>
tensor<[1,120,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 14 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x120x17x17>
tensor<[1,120,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2040 + d1 * 17 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x120x17x17>
tensor<[1,120,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2040 + d1 * 17 + d2, d3), memory_config: (64, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x2>>, >
shape: #ttnn.shape<1x120x40x40>
tensor<[1,120,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 40 + d2, d3), memory_config: (150, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<532x1>>, >
shape: #ttnn.shape<1x1216x14x14>
tensor<[1,1216,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17024 + d1 * 14 + d2, d3), memory_config: (532, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x1216x7x7>
tensor<[1,1216,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 7 + d2, d3), memory_config: (266, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6534x5>>, >
shape: #ttnn.shape<1x121x12x144x144>
tensor<[1,121,12,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 209088 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (6534, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3267x5>>, >
shape: #ttnn.shape<1x121x6x144x144>
tensor<[1,121,6,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 104544 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (3267, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<546x1>>, >
shape: #ttnn.shape<1x1248x14x14>
tensor<[1,1248,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17472 + d1 * 14 + d2, d3), memory_config: (546, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<273x1>>, >
shape: #ttnn.shape<1x1248x7x7>
tensor<[1,1248,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8736 + d1 * 7 + d2, d3), memory_config: (273, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<351x1>>, >
shape: #ttnn.shape<1x1248x9x9>
tensor<[1,1248,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11232 + d1 * 9 + d2, d3), memory_config: (351, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x40>>, >
shape: #ttnn.shape<1x1280>
tensor<[1,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<400x1>>, >
shape: #ttnn.shape<1x1280x10x10>
tensor<[1,1280,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12800 + d1 * 10 + d2, d3), memory_config: (400, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1280x12x12>
tensor<[1,1280,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 12 + d2, d3), memory_config: (480, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<560x1>>, >
shape: #ttnn.shape<1x1280x14x14>
tensor<[1,1280,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17920 + d1 * 14 + d2, d3), memory_config: (560, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x1280x32x32>
tensor<[1,1280,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 32 + d2, d3), memory_config: (1280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x1280x9x9>
tensor<[1,1280,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 9 + d2, d3), memory_config: (360, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x128>
tensor<[1,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x4>>, >
shape: #ttnn.shape<1x128x112x112>
tensor<[1,128,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 112 + d2, d3), memory_config: (448, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x128x14x14>
tensor<[1,128,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 14 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x5>>, >
shape: #ttnn.shape<1x128x150x150>
tensor<[1,128,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 150 + d2, d3), memory_config: (600, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<68x1>>, >
shape: #ttnn.shape<1x128x17x17>
tensor<[1,128,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2176 + d1 * 17 + d2, d3), memory_config: (68, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x10>>, >
shape: #ttnn.shape<1x128x180x320>
tensor<[1,128,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 180 + d2, d3), memory_config: (720, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x128x1x1>
tensor<[1,128,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x128x2x2>
tensor<[1,128,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 2 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x128x32x32>
tensor<[1,128,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 32 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x128x32x32>
tensor<[1,128,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 32 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x128x3x3>
tensor<[1,128,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 3 + d2, d3), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x1>>, >
shape: #ttnn.shape<1x128x5x5>
tensor<[1,128,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 5 + d2, d3), memory_config: (20, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x3>>, >
shape: #ttnn.shape<1x128x75x75>
tensor<[1,128,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 75 + d2, d3), memory_config: (300, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x3>>, >
shape: #ttnn.shape<1x128x75x75>
tensor<[1,128,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 75 + d2, d3), memory_config: (300, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x128x7x7>
tensor<[1,128,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 7 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x5>>, >
shape: #ttnn.shape<1x128x90x160>
tensor<[1,128,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 90 + d2, d3), memory_config: (360, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x10x10>
tensor<[1,12,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 10 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x10x10>
tensor<[1,12,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 10 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x10x10>
tensor<[1,12,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 10 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x12x12x128>
tensor<[1,12,12,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x12x12x12>
tensor<[1,12,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<2x12x13x13>
tensor<[2,12,13,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 156 + d1 * 13 + d2, d3), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x12x14x14>
tensor<[1,12,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 14 + d2, d3), memory_config: (6, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x12x16x16>
tensor<[1,12,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 16 + d2, d3), memory_config: (6, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x7>>, >
shape: #ttnn.shape<1x12x197x197>
tensor<[1,12,197,197,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2364 + d1 * 197 + d2, d3), memory_config: (74, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x10>
tensor<[1,12,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x10>
tensor<[1,12,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x11>
tensor<[1,12,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x128>
tensor<[1,12,1,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x12>
tensor<[1,12,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x13>
tensor<[1,12,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x13>
tensor<[1,12,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x14>
tensor<[1,12,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x14>
tensor<[1,12,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x15>
tensor<[1,12,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x15>
tensor<[1,12,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x16>
tensor<[1,12,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x16>
tensor<[1,12,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x17>
tensor<[1,12,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x17>
tensor<[1,12,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x18>
tensor<[1,12,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x18>
tensor<[1,12,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x19>
tensor<[1,12,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x19>
tensor<[1,12,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x1>
tensor<[1,12,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x20>
tensor<[1,12,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x20>
tensor<[1,12,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x21>
tensor<[1,12,1,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x22>
tensor<[1,12,1,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x23>
tensor<[1,12,1,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x24>
tensor<[1,12,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x25>
tensor<[1,12,1,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x26>
tensor<[1,12,1,26,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x27>
tensor<[1,12,1,27,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x28>
tensor<[1,12,1,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x29>
tensor<[1,12,1,29,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x2>
tensor<[1,12,1,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x3>
tensor<[1,12,1,3,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x46>
tensor<[1,12,1,46,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x47>
tensor<[1,12,1,47,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x48>
tensor<[1,12,1,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x49>
tensor<[1,12,1,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x4>
tensor<[1,12,1,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x50>
tensor<[1,12,1,50,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x51>
tensor<[1,12,1,51,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x52>
tensor<[1,12,1,52,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x53>
tensor<[1,12,1,53,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x54>
tensor<[1,12,1,54,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x55>
tensor<[1,12,1,55,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x56>
tensor<[1,12,1,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x57>
tensor<[1,12,1,57,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x58>
tensor<[1,12,1,58,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x59>
tensor<[1,12,1,59,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x5>
tensor<[1,12,1,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x60>
tensor<[1,12,1,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x61>
tensor<[1,12,1,61,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x62>
tensor<[1,12,1,62,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x63>
tensor<[1,12,1,63,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x64>
tensor<[1,12,1,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x65>
tensor<[1,12,1,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x66>
tensor<[1,12,1,66,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x67>
tensor<[1,12,1,67,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x68>
tensor<[1,12,1,68,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x69>
tensor<[1,12,1,69,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x6>
tensor<[1,12,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x70>
tensor<[1,12,1,70,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x71>
tensor<[1,12,1,71,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x72>
tensor<[1,12,1,72,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x73>
tensor<[1,12,1,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x74>
tensor<[1,12,1,74,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x75>
tensor<[1,12,1,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x76>
tensor<[1,12,1,76,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x77>
tensor<[1,12,1,77,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x78>
tensor<[1,12,1,78,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x79>
tensor<[1,12,1,79,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x7>
tensor<[1,12,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x80>
tensor<[1,12,1,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x81>
tensor<[1,12,1,81,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x82>
tensor<[1,12,1,82,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x83>
tensor<[1,12,1,83,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x84>
tensor<[1,12,1,84,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x85>
tensor<[1,12,1,85,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x86>
tensor<[1,12,1,86,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x87>
tensor<[1,12,1,87,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x88>
tensor<[1,12,1,88,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x89>
tensor<[1,12,1,89,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x8>
tensor<[1,12,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x90>
tensor<[1,12,1,90,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x91>
tensor<[1,12,1,91,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x92>
tensor<[1,12,1,92,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x93>
tensor<[1,12,1,93,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x94>
tensor<[1,12,1,94,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x95>
tensor<[1,12,1,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x96>
tensor<[1,12,1,96,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x97>
tensor<[1,12,1,97,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x98>
tensor<[1,12,1,98,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x99>
tensor<[1,12,1,99,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x9>
tensor<[1,12,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<76x7>>, >
shape: #ttnn.shape<1x12x201x201>
tensor<[1,12,201,201,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2412 + d1 * 201 + d2, d3), memory_config: (76, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x12x25x25>
tensor<[1,12,25,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 25 + d2, d3), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x2>>, >
shape: #ttnn.shape<1x12x45x45>
tensor<[1,12,45,45,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 540 + d1 * 45 + d2, d3), memory_config: (17, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x2>>, >
shape: #ttnn.shape<1x12x56x56>
tensor<[1,12,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 56 + d2, d3), memory_config: (21, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x12x768>
tensor<[1,12,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x12x768>
tensor<[1,12,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x12x7x7>
tensor<[1,12,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 7 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x12x8x8>
tensor<[1,12,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 8 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x12x9x9>
tensor<[1,12,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 108 + d1 * 9 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<574x1>>, >
shape: #ttnn.shape<1x1312x14x14>
tensor<[1,1312,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18368 + d1 * 14 + d2, d3), memory_config: (574, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x1312x7x7>
tensor<[1,1312,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9184 + d1 * 7 + d2, d3), memory_config: (287, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x1344x14x14>
tensor<[1,1344,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 14 + d2, d3), memory_config: (588, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x1344x14x14>
tensor<[1,1344,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 14 + d2, d3), memory_config: (588, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x1>>, >
shape: #ttnn.shape<1x1344x28x28>
tensor<[1,1344,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 28 + d2, d3), memory_config: (1176, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x1344x7x7>
tensor<[1,1344,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 7 + d2, d3), memory_config: (294, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<118x1>>, >
shape: #ttnn.shape<1x134x28x28>
tensor<[1,134,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3752 + d1 * 28 + d2, d3), memory_config: (118, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x136x19x19>
tensor<[1,136,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2584 + d1 * 19 + d2, d3), memory_config: (81, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x136x19x19>
tensor<[1,136,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2584 + d1 * 19 + d2, d3), memory_config: (81, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x1>>, >
shape: #ttnn.shape<1x1370x1>
tensor<[1,1370,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<602x1>>, >
shape: #ttnn.shape<1x1376x14x14>
tensor<[1,1376,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19264 + d1 * 14 + d2, d3), memory_config: (602, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<301x1>>, >
shape: #ttnn.shape<1x1376x7x7>
tensor<[1,1376,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9632 + d1 * 7 + d2, d3), memory_config: (301, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<435x1>>, >
shape: #ttnn.shape<1x1392x10x10>
tensor<[1,1392,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13920 + d1 * 10 + d2, d3), memory_config: (435, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x1392x14x14>
tensor<[1,1392,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 14 + d2, d3), memory_config: (609, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x1392x14x14>
tensor<[1,1392,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 14 + d2, d3), memory_config: (609, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x1>>, >
shape: #ttnn.shape<1x1392x28x28>
tensor<[1,1392,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 28 + d2, d3), memory_config: (1218, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x13x1536>
tensor<[1,13,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13x1>
tensor<[1,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13x1>
tensor<[1,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x13x3072>
tensor<[1,13,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<1x13x3584>
tensor<[1,13,3584,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 112, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<616x1>>, >
shape: #ttnn.shape<1x1408x14x14>
tensor<[1,1408,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19712 + d1 * 14 + d2, d3), memory_config: (616, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x1408x7x7>
tensor<[1,1408,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 7 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<630x1>>, >
shape: #ttnn.shape<1x1440x14x14>
tensor<[1,1440,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20160 + d1 * 14 + d2, d3), memory_config: (630, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x1440x7x7>
tensor<[1,1440,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 7 + d2, d3), memory_config: (315, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x1>>, >
shape: #ttnn.shape<1x1445x1>
tensor<[1,1445,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x144x14x14>
tensor<[1,144,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 14 + d2, d3), memory_config: (63, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<675x5>>, >
shape: #ttnn.shape<1x144x150x150>
tensor<[1,144,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21600 + d1 * 150 + d2, d3), memory_config: (675, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<855x6>>, >
shape: #ttnn.shape<1x144x190x190>
tensor<[1,144,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27360 + d1 * 190 + d2, d3), memory_config: (855, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x144x28x28>
tensor<[1,144,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 28 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<135x1>>, >
shape: #ttnn.shape<1x144x30x30>
tensor<[1,144,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4320 + d1 * 30 + d2, d3), memory_config: (135, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x2>>, >
shape: #ttnn.shape<1x144x33x33>
tensor<[1,144,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 33 + d2, d3), memory_config: (149, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x2>>, >
shape: #ttnn.shape<1x144x56x56>
tensor<[1,144,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 56 + d2, d3), memory_config: (252, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<270x2>>, >
shape: #ttnn.shape<1x144x60x60>
tensor<[1,144,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8640 + d1 * 60 + d2, d3), memory_config: (270, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<293x3>>, >
shape: #ttnn.shape<1x144x65x65>
tensor<[1,144,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9360 + d1 * 65 + d2, d3), memory_config: (293, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<338x3>>, >
shape: #ttnn.shape<1x144x75x75>
tensor<[1,144,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10800 + d1 * 75 + d2, d3), memory_config: (338, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x144x7x7>
tensor<[1,144,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 7 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x144x7x7>
tensor<[1,144,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 7 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<428x3>>, >
shape: #ttnn.shape<1x144x95x95>
tensor<[1,144,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13680 + d1 * 95 + d2, d3), memory_config: (428, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<644x1>>, >
shape: #ttnn.shape<1x1472x14x14>
tensor<[1,1472,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20608 + d1 * 14 + d2, d3), memory_config: (644, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x1472x7x7>
tensor<[1,1472,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 7 + d2, d3), memory_config: (322, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x14x14x1024>
tensor<[1,14,14,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x14x14x1>
tensor<[1,14,14,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x64>>, >
shape: #ttnn.shape<1x14x14x2048>
tensor<[1,14,14,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x14x1536>
tensor<[1,14,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x14x56x56>
tensor<[1,14,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 56 + d2, d3), memory_config: (25, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x14x768>
tensor<[1,14,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x14x768>
tensor<[1,14,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x1500x1>
tensor<[1,1500,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<658x1>>, >
shape: #ttnn.shape<1x1504x14x14>
tensor<[1,1504,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21056 + d1 * 14 + d2, d3), memory_config: (658, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<329x1>>, >
shape: #ttnn.shape<1x1504x7x7>
tensor<[1,1504,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10528 + d1 * 7 + d2, d3), memory_config: (329, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1536x10x10>
tensor<[1,1536,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 10 + d2, d3), memory_config: (480, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x1536x14x14>
tensor<[1,1536,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 14 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x1536x7x7>
tensor<[1,1536,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 7 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<686x1>>, >
shape: #ttnn.shape<1x1568x14x14>
tensor<[1,1568,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21952 + d1 * 14 + d2, d3), memory_config: (686, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<343x1>>, >
shape: #ttnn.shape<1x1568x7x7>
tensor<[1,1568,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10976 + d1 * 7 + d2, d3), memory_config: (343, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x15x1536>
tensor<[1,15,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15x1>
tensor<[1,15,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15x1>
tensor<[1,15,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x15x3072>
tensor<[1,15,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x15x512>
tensor<[1,15,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<700x1>>, >
shape: #ttnn.shape<1x1600x14x14>
tensor<[1,1600,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22400 + d1 * 14 + d2, d3), memory_config: (700, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x1600x7x7>
tensor<[1,1600,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 7 + d2, d3), memory_config: (350, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x160x14x14>
tensor<[1,160,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 14 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x160x24x24>
tensor<[1,160,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 24 + d2, d3), memory_config: (120, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x160x24x24>
tensor<[1,160,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 24 + d2, d3), memory_config: (120, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x160x28x28>
tensor<[1,160,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 28 + d2, d3), memory_config: (140, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x2>>, >
shape: #ttnn.shape<1x160x56x56>
tensor<[1,160,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 56 + d2, d3), memory_config: (280, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<612x1>>, >
shape: #ttnn.shape<1x1632x12x12>
tensor<[1,1632,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19584 + d1 * 12 + d2, d3), memory_config: (612, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<714x1>>, >
shape: #ttnn.shape<1x1632x14x14>
tensor<[1,1632,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22848 + d1 * 14 + d2, d3), memory_config: (714, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<357x1>>, >
shape: #ttnn.shape<1x1632x7x7>
tensor<[1,1632,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11424 + d1 * 7 + d2, d3), memory_config: (357, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<1x16384x192>
tensor<[1,16384,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<1x16384x192>
tensor<[1,16384,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x1>
tensor<[1,16384,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x16384x256>
tensor<[1,16384,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<1x16384x384>
tensor<[1,16384,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<1x16384x384>
tensor<[1,16384,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x16384x768>
tensor<[1,16384,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<728x1>>, >
shape: #ttnn.shape<1x1664x14x14>
tensor<[1,1664,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23296 + d1 * 14 + d2, d3), memory_config: (728, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x1664x7x7>
tensor<[1,1664,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 7 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x168x28x28>
tensor<[1,168,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 28 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<742x1>>, >
shape: #ttnn.shape<1x1696x14x14>
tensor<[1,1696,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23744 + d1 * 14 + d2, d3), memory_config: (742, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<371x1>>, >
shape: #ttnn.shape<1x1696x7x7>
tensor<[1,1696,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11872 + d1 * 7 + d2, d3), memory_config: (371, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x16x10x10>
tensor<[1,16,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 160 + d1 * 10 + d2, d3), memory_config: (5, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x16x10x10>
tensor<[1,16,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 160 + d1 * 10 + d2, d3), memory_config: (5, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x4>>, >
shape: #ttnn.shape<1x16x120x120>
tensor<[1,16,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 120 + d2, d3), memory_config: (60, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<65x5>>, >
shape: #ttnn.shape<1x16x130x130>
tensor<[1,16,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2080 + d1 * 130 + d2, d3), memory_config: (65, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x16x14x14>
tensor<[1,16,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x5>>, >
shape: #ttnn.shape<1x16x160x160>
tensor<[1,16,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 160 + d2, d3), memory_config: (80, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x5>>, >
shape: #ttnn.shape<1x16x160x160>
tensor<[1,16,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 160 + d2, d3), memory_config: (80, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x7>>, >
shape: #ttnn.shape<1x16x197x197>
tensor<[1,16,197,197,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3152 + d1 * 197 + d2, d3), memory_config: (99, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1>
tensor<[1,16,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x10>
tensor<[1,16,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x10>
tensor<[1,16,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x11>
tensor<[1,16,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x11>
tensor<[1,16,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x12>
tensor<[1,16,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x12>
tensor<[1,16,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x13>
tensor<[1,16,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x13>
tensor<[1,16,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x14>
tensor<[1,16,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x14>
tensor<[1,16,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x15>
tensor<[1,16,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x15>
tensor<[1,16,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x16>
tensor<[1,16,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x16>
tensor<[1,16,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x17>
tensor<[1,16,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x17>
tensor<[1,16,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x18>
tensor<[1,16,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x18>
tensor<[1,16,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x19>
tensor<[1,16,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x19>
tensor<[1,16,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x1>
tensor<[1,16,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x20>
tensor<[1,16,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x20>
tensor<[1,16,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x21>
tensor<[1,16,1,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x22>
tensor<[1,16,1,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x23>
tensor<[1,16,1,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x24>
tensor<[1,16,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x25>
tensor<[1,16,1,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x26>
tensor<[1,16,1,26,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x27>
tensor<[1,16,1,27,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x28>
tensor<[1,16,1,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x29>
tensor<[1,16,1,29,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x2>
tensor<[1,16,1,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x3>
tensor<[1,16,1,3,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x4>
tensor<[1,16,1,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x5>
tensor<[1,16,1,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x6>
tensor<[1,16,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x6>
tensor<[1,16,1,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x7>
tensor<[1,16,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x7>
tensor<[1,16,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x8>
tensor<[1,16,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x8>
tensor<[1,16,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x9>
tensor<[1,16,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x9>
tensor<[1,16,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x7>>, >
shape: #ttnn.shape<1x16x224x224>
tensor<[1,16,224,224,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 224 + d2, d3), memory_config: (112, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<1x16x256x256>
tensor<[1,16,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 256 + d2, d3), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x16x28x28>
tensor<[1,16,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x16x32x32>
tensor<[1,16,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 32 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x2>>, >
shape: #ttnn.shape<1x16x56x56>
tensor<[1,16,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 56 + d2, d3), memory_config: (28, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x16x5x5>
tensor<[1,16,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 5 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x16x6x6>
tensor<[1,16,6,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 6 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x2>>, >
shape: #ttnn.shape<1x16x8x49x49>
tensor<[1,16,8,49,49,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 6272 + d1 * 392 + d2 * 49 + d3, d4), memory_config: (196, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<1x16x9x9>
tensor<[1,16,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<756x1>>, >
shape: #ttnn.shape<1x1728x14x14>
tensor<[1,1728,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24192 + d1 * 14 + d2, d3), memory_config: (756, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x1728x7x7>
tensor<[1,1728,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 7 + d2, d3), memory_config: (378, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<770x1>>, >
shape: #ttnn.shape<1x1760x14x14>
tensor<[1,1760,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24640 + d1 * 14 + d2, d3), memory_config: (770, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<385x1>>, >
shape: #ttnn.shape<1x1760x7x7>
tensor<[1,1760,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12320 + d1 * 7 + d2, d3), memory_config: (385, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<784x1>>, >
shape: #ttnn.shape<1x1792x14x14>
tensor<[1,1792,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25088 + d1 * 14 + d2, d3), memory_config: (784, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x1792x7x7>
tensor<[1,1792,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 7 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<399x1>>, >
shape: #ttnn.shape<1x1824x7x7>
tensor<[1,1824,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12768 + d1 * 7 + d2, d3), memory_config: (399, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x184x14x14>
tensor<[1,184,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2576 + d1 * 14 + d2, d3), memory_config: (81, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<115x1>>, >
shape: #ttnn.shape<1x184x20x20>
tensor<[1,184,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3680 + d1 * 20 + d2, d3), memory_config: (115, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x184x7x7>
tensor<[1,184,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 7 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x1856x7x7>
tensor<[1,1856,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 7 + d2, d3), memory_config: (406, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<413x1>>, >
shape: #ttnn.shape<1x1888x7x7>
tensor<[1,1888,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13216 + d1 * 7 + d2, d3), memory_config: (413, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x18x14x14>
tensor<[1,18,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 14 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x18x28x28>
tensor<[1,18,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 28 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x18x56x56>
tensor<[1,18,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 56 + d2, d3), memory_config: (32, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x18x56x56>
tensor<[1,18,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 56 + d2, d3), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x18x7x7>
tensor<[1,18,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 126 + d1 * 7 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x1>>, >
shape: #ttnn.shape<1x19200x1>
tensor<[1,19200,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x1920x16x16>
tensor<[1,1920,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 16 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x1>>, >
shape: #ttnn.shape<1x1920x32x32>
tensor<[1,1920,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 32 + d2, d3), memory_config: (1920, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x1920x7x7>
tensor<[1,1920,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 7 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x192x14x14>
tensor<[1,192,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 14 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x192x17x17>
tensor<[1,192,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 17 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x192x28x28>
tensor<[1,192,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 28 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x2>>, >
shape: #ttnn.shape<1x192x35x35>
tensor<[1,192,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 35 + d2, d3), memory_config: (210, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<228x2>>, >
shape: #ttnn.shape<1x192x38x38>
tensor<[1,192,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7296 + d1 * 38 + d2, d3), memory_config: (228, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x2>>, >
shape: #ttnn.shape<1x192x48x48>
tensor<[1,192,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 48 + d2, d3), memory_config: (288, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x2>>, >
shape: #ttnn.shape<1x192x56x56>
tensor<[1,192,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 56 + d2, d3), memory_config: (336, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<450x3>>, >
shape: #ttnn.shape<1x192x75x75>
tensor<[1,192,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 75 + d2, d3), memory_config: (450, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x192x7x7>
tensor<[1,192,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 7 + d2, d3), memory_config: (42, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x192x7x7>
tensor<[1,192,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 7 + d2, d3), memory_config: (42, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x192x8x8>
tensor<[1,192,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 8 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x192x8x8>
tensor<[1,192,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 8 + d2, d3), memory_config: (48, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<570x3>>, >
shape: #ttnn.shape<1x192x95x95>
tensor<[1,192,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18240 + d1 * 95 + d2, d3), memory_config: (570, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x193x768>
tensor<[1,193,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 193 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x196x14x14>
tensor<[1,196,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 14 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x196x1>
tensor<[1,196,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x197x1>
tensor<[1,197,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x12x12>
tensor<[1,1,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 * 12 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x13x13>
tensor<[1,1,13,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13 + d1 * 13 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1x1536>
tensor<[1,1,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x16x32>
tensor<[1,1,16,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x10>
tensor<[1,1,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x11>
tensor<[1,1,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x12>
tensor<[1,1,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x13>
tensor<[1,1,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x14>
tensor<[1,1,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x15>
tensor<[1,1,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x16>
tensor<[1,1,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x17>
tensor<[1,1,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x18>
tensor<[1,1,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x19>
tensor<[1,1,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x20>
tensor<[1,1,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x21>
tensor<[1,1,1,21,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x22>
tensor<[1,1,1,22,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x23>
tensor<[1,1,1,23,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x24>
tensor<[1,1,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x25>
tensor<[1,1,1,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x26>
tensor<[1,1,1,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x27>
tensor<[1,1,1,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x28>
tensor<[1,1,1,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x29>
tensor<[1,1,1,29,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x46>
tensor<[1,1,1,46,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x47>
tensor<[1,1,1,47,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x48>
tensor<[1,1,1,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x49>
tensor<[1,1,1,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x50>
tensor<[1,1,1,50,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x51>
tensor<[1,1,1,51,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x52>
tensor<[1,1,1,52,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x53>
tensor<[1,1,1,53,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x54>
tensor<[1,1,1,54,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x55>
tensor<[1,1,1,55,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x56>
tensor<[1,1,1,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x57>
tensor<[1,1,1,57,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x58>
tensor<[1,1,1,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x59>
tensor<[1,1,1,59,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x60>
tensor<[1,1,1,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x61>
tensor<[1,1,1,61,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x62>
tensor<[1,1,1,62,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x63>
tensor<[1,1,1,63,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x64>
tensor<[1,1,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x65>
tensor<[1,1,1,65,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x66>
tensor<[1,1,1,66,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x67>
tensor<[1,1,1,67,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x68>
tensor<[1,1,1,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x69>
tensor<[1,1,1,69,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x6>
tensor<[1,1,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x70>
tensor<[1,1,1,70,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x71>
tensor<[1,1,1,71,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x72>
tensor<[1,1,1,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x73>
tensor<[1,1,1,73,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x74>
tensor<[1,1,1,74,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x75>
tensor<[1,1,1,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x76>
tensor<[1,1,1,76,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x77>
tensor<[1,1,1,77,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x78>
tensor<[1,1,1,78,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x79>
tensor<[1,1,1,79,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x7>
tensor<[1,1,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x80>
tensor<[1,1,1,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x81>
tensor<[1,1,1,81,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x82>
tensor<[1,1,1,82,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x83>
tensor<[1,1,1,83,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x84>
tensor<[1,1,1,84,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x85>
tensor<[1,1,1,85,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x86>
tensor<[1,1,1,86,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x87>
tensor<[1,1,1,87,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x88>
tensor<[1,1,1,88,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x89>
tensor<[1,1,1,89,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x8>
tensor<[1,1,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x90>
tensor<[1,1,1,90,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x91>
tensor<[1,1,1,91,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x92>
tensor<[1,1,1,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x93>
tensor<[1,1,1,93,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x94>
tensor<[1,1,1,94,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x95>
tensor<[1,1,1,95,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x96>
tensor<[1,1,1,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x97>
tensor<[1,1,1,97,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x98>
tensor<[1,1,1,98,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x99>
tensor<[1,1,1,99,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x9>
tensor<[1,1,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x32x32>
tensor<[1,1,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 * 32 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x1x45x45>
tensor<[1,1,45,45,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 45 + d1 * 45 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x5x5>
tensor<[1,1,5,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5 + d1 * 5 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x7x64>
tensor<[1,1,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x7x7>
tensor<[1,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<1x200x14x14>
tensor<[1,200,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2800 + d1 * 14 + d2, d3), memory_config: (88, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<125x1>>, >
shape: #ttnn.shape<1x200x20x20>
tensor<[1,200,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4000 + d1 * 20 + d2, d3), memory_config: (125, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x200x7x7>
tensor<[1,200,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 7 + d2, d3), memory_config: (44, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x201x1>
tensor<[1,201,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2048x10x10>
tensor<[1,2048,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 10 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x2048x14x14>
tensor<[1,2048,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 14 + d2, d3), memory_config: (896, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x2048x1>
tensor<[1,2048,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1472x2>>, >
shape: #ttnn.shape<1x2048x23x40>
tensor<[1,2048,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 47104 + d1 * 23 + d2, d3), memory_config: (1472, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1472x2>>, >
shape: #ttnn.shape<1x2048x23x40>
tensor<[1,2048,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 47104 + d1 * 23 + d2, d3), memory_config: (1472, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x2048x7x7>
tensor<[1,2048,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 7 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x2048x7x7>
tensor<[1,2048,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 7 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<91x1>>, >
shape: #ttnn.shape<1x208x14x14>
tensor<[1,208,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2912 + d1 * 14 + d2, d3), memory_config: (91, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<59x1>>, >
shape: #ttnn.shape<1x208x9x9>
tensor<[1,208,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1872 + d1 * 9 + d2, d3), memory_config: (59, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<59x1>>, >
shape: #ttnn.shape<1x208x9x9>
tensor<[1,208,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1872 + d1 * 9 + d2, d3), memory_config: (59, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x20x28x28>
tensor<[1,20,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 28 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x683>>, >
shape: #ttnn.shape<1x21843>
tensor<[1,21843,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 683, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x224x14x14>
tensor<[1,224,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 14 + d2, d3), memory_config: (98, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x1>>, >
shape: #ttnn.shape<1x224x17x17>
tensor<[1,224,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 17 + d2, d3), memory_config: (119, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x224x28x28>
tensor<[1,224,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 28 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x2>>, >
shape: #ttnn.shape<1x224x35x35>
tensor<[1,224,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 35 + d2, d3), memory_config: (245, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x224x56x56>
tensor<[1,224,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 56 + d2, d3), memory_config: (392, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x224x7x7>
tensor<[1,224,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 7 + d2, d3), memory_config: (49, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<73x1>>, >
shape: #ttnn.shape<1x232x10x10>
tensor<[1,232,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2320 + d1 * 10 + d2, d3), memory_config: (73, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<73x1>>, >
shape: #ttnn.shape<1x232x10x10>
tensor<[1,232,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2320 + d1 * 10 + d2, d3), memory_config: (73, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x4>>, >
shape: #ttnn.shape<1x232x112x112>
tensor<[1,232,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 112 + d2, d3), memory_config: (812, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x2>>, >
shape: #ttnn.shape<1x232x56x56>
tensor<[1,232,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 56 + d2, d3), memory_config: (406, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x2>>, >
shape: #ttnn.shape<1x232x56x56>
tensor<[1,232,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 56 + d2, d3), memory_config: (406, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x1>>, >
shape: #ttnn.shape<1x240x15x15>
tensor<[1,240,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 15 + d2, d3), memory_config: (113, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x240x20x20>
tensor<[1,240,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 20 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x240x30x30>
tensor<[1,240,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 30 + d2, d3), memory_config: (225, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x2>>, >
shape: #ttnn.shape<1x240x40x40>
tensor<[1,240,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 40 + d2, d3), memory_config: (300, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x4>>, >
shape: #ttnn.shape<1x24x112x112>
tensor<[1,24,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 112 + d2, d3), memory_config: (84, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x24x14x14>
tensor<[1,24,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 14 + d2, d3), memory_config: (11, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x5>>, >
shape: #ttnn.shape<1x24x150x150>
tensor<[1,24,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 150 + d2, d3), memory_config: (113, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<143x6>>, >
shape: #ttnn.shape<1x24x190x190>
tensor<[1,24,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4560 + d1 * 190 + d2, d3), memory_config: (143, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x24x28x28>
tensor<[1,24,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 28 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x4>>, >
shape: #ttnn.shape<1x24x32x128>
tensor<[1,24,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x24x32x32>
tensor<[1,24,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x2>>, >
shape: #ttnn.shape<1x24x56x56>
tensor<[1,24,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 56 + d2, d3), memory_config: (42, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x2>>, >
shape: #ttnn.shape<1x24x56x56>
tensor<[1,24,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 56 + d2, d3), memory_config: (42, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<45x2>>, >
shape: #ttnn.shape<1x24x60x60>
tensor<[1,24,60,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1440 + d1 * 60 + d2, d3), memory_config: (45, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<45x2>>, >
shape: #ttnn.shape<1x24x60x60>
tensor<[1,24,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1440 + d1 * 60 + d2, d3), memory_config: (45, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x3>>, >
shape: #ttnn.shape<1x24x65x65>
tensor<[1,24,65,65,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1560 + d1 * 65 + d2, d3), memory_config: (49, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x3>>, >
shape: #ttnn.shape<1x24x65x65>
tensor<[1,24,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1560 + d1 * 65 + d2, d3), memory_config: (49, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x24x80x80>
tensor<[1,24,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 80 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x24x80x80>
tensor<[1,24,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 80 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1103x1>>, >
shape: #ttnn.shape<1x2520x14x14>
tensor<[1,2520,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 35280 + d1 * 14 + d2, d3), memory_config: (1103, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<552x1>>, >
shape: #ttnn.shape<1x2520x7x7>
tensor<[1,2520,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17640 + d1 * 7 + d2, d3), memory_config: (552, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<552x1>>, >
shape: #ttnn.shape<1x2520x7x7>
tensor<[1,2520,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17640 + d1 * 7 + d2, d3), memory_config: (552, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x2560x16x16>
tensor<[1,2560,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 16 + d2, d3), memory_config: (1280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2560x8x8>
tensor<[1,2560,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 8 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<1x256>
tensor<[1,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x256x10x10>
tensor<[1,256,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 10 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<1x256x128x128>
tensor<[1,256,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 128 + d2, d3), memory_config: (1024, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x256x14x14>
tensor<[1,256,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 14 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x256x14x14>
tensor<[1,256,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 14 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<1x256x1536>
tensor<[1,256,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<1x256x1536>
tensor<[1,256,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x256x160>
tensor<[1,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x1>>, >
shape: #ttnn.shape<1x256x17x17>
tensor<[1,256,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4352 + d1 * 17 + d2, d3), memory_config: (136, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x10>>, >
shape: #ttnn.shape<1x256x180x320>
tensor<[1,256,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 180 + d2, d3), memory_config: (1440, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x10>>, >
shape: #ttnn.shape<1x256x180x320>
tensor<[1,256,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 180 + d2, d3), memory_config: (1440, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x256x2x2>
tensor<[1,256,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 2 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x256x3072>
tensor<[1,256,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x32>
tensor<[1,256,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<304x2>>, >
shape: #ttnn.shape<1x256x38x38>
tensor<[1,256,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9728 + d1 * 38 + d2, d3), memory_config: (304, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<304x2>>, >
shape: #ttnn.shape<1x256x38x38>
tensor<[1,256,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9728 + d1 * 38 + d2, d3), memory_config: (304, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x256x3x3>
tensor<[1,256,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 3 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x3>>, >
shape: #ttnn.shape<1x256x45x80>
tensor<[1,256,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 45 + d2, d3), memory_config: (360, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x256x5x5>
tensor<[1,256,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 5 + d2, d3), memory_config: (40, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x256x64>
tensor<[1,256,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x3>>, >
shape: #ttnn.shape<1x256x75x75>
tensor<[1,256,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 75 + d2, d3), memory_config: (600, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x3>>, >
shape: #ttnn.shape<1x256x75x75>
tensor<[1,256,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 75 + d2, d3), memory_config: (600, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x256x7x7>
tensor<[1,256,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 7 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x256x8x8>
tensor<[1,256,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 8 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x5>>, >
shape: #ttnn.shape<1x256x90x160>
tensor<[1,256,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 90 + d2, d3), memory_config: (720, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x1>>, >
shape: #ttnn.shape<1x257x1>
tensor<[1,257,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25x1>
tensor<[1,25,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x272x12x12>
tensor<[1,272,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 12 + d2, d3), memory_config: (102, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x272x12x12>
tensor<[1,272,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 12 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x1>>, >
shape: #ttnn.shape<1x272x7x7>
tensor<[1,272,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1904 + d1 * 7 + d2, d3), memory_config: (60, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27x1>
tensor<[1,27,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x27x768>
tensor<[1,27,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x288x14x14>
tensor<[1,288,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 14 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<153x1>>, >
shape: #ttnn.shape<1x288x17x17>
tensor<[1,288,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4896 + d1 * 17 + d2, d3), memory_config: (153, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<171x1>>, >
shape: #ttnn.shape<1x288x19x19>
tensor<[1,288,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5472 + d1 * 19 + d2, d3), memory_config: (171, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x288x28x28>
tensor<[1,288,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 28 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<297x2>>, >
shape: #ttnn.shape<1x288x33x33>
tensor<[1,288,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9504 + d1 * 33 + d2, d3), memory_config: (297, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x2>>, >
shape: #ttnn.shape<1x288x38x38>
tensor<[1,288,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 38 + d2, d3), memory_config: (342, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x4>>, >
shape: #ttnn.shape<1x28x13x128>
tensor<[1,28,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x28x13x13>
tensor<[1,28,13,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x32>>, >
shape: #ttnn.shape<1x28x28x1024>
tensor<[1,28,28,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x1>
tensor<[1,28,28,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x28>
tensor<[1,28,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x16>>, >
shape: #ttnn.shape<1x28x28x512>
tensor<[1,28,28,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x2>
tensor<[1,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x12x128>
tensor<[1,2,12,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 12 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x1x128>
tensor<[1,2,1,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x300x128>
tensor<[1,300,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x300x1>
tensor<[1,300,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<1x300x320>
tensor<[1,300,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x300x512>
tensor<[1,300,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x300x512>
tensor<[1,300,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x300x64>
tensor<[1,300,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x3072>
tensor<[1,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x16>
tensor<[1,3072,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x98>>, >
shape: #ttnn.shape<1x3129>
tensor<[1,3129,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 98, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x320x14x14>
tensor<[1,320,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 14 + d2, d3), memory_config: (140, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<170x1>>, >
shape: #ttnn.shape<1x320x17x17>
tensor<[1,320,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5440 + d1 * 17 + d2, d3), memory_config: (170, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x320x28x28>
tensor<[1,320,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 28 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x320x32x32>
tensor<[1,320,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 32 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x320x7x7>
tensor<[1,320,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 7 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x320x8x8>
tensor<[1,320,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 8 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x4>>, >
shape: #ttnn.shape<1x32x112x112>
tensor<[1,32,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 112 + d2, d3), memory_config: (112, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x4>>, >
shape: #ttnn.shape<1x32x120x120>
tensor<[1,32,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x5>>, >
shape: #ttnn.shape<1x32x120x160>
tensor<[1,32,120,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<1x32x128x128>
tensor<[1,32,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<1x32x128x128>
tensor<[1,32,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<130x5>>, >
shape: #ttnn.shape<1x32x130x130>
tensor<[1,32,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4160 + d1 * 130 + d2, d3), memory_config: (130, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x5>>, >
shape: #ttnn.shape<1x32x147x147>
tensor<[1,32,147,147,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 147 + d2, d3), memory_config: (147, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x5>>, >
shape: #ttnn.shape<1x32x149x149>
tensor<[1,32,149,149,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4768 + d1 * 149 + d2, d3), memory_config: (149, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x32x14x14>
tensor<[1,32,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 14 + d2, d3), memory_config: (14, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x5>>, >
shape: #ttnn.shape<1x32x150x150>
tensor<[1,32,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 150 + d2, d3), memory_config: (150, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<190x6>>, >
shape: #ttnn.shape<1x32x190x190>
tensor<[1,32,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6080 + d1 * 190 + d2, d3), memory_config: (190, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1>
tensor<[1,32,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1>
tensor<[1,32,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x32x256x256>
tensor<[1,32,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 256 + d2, d3), memory_config: (256, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x32x28x28>
tensor<[1,32,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 28 + d2, d3), memory_config: (28, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x32x28x28>
tensor<[1,32,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 28 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x32x3072>
tensor<[1,32,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x32x30x40>
tensor<[1,32,30,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x4>>, >
shape: #ttnn.shape<1x32x32x128>
tensor<[1,32,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x32x32x32>
tensor<[1,32,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x32x4096>
tensor<[1,32,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x2>>, >
shape: #ttnn.shape<1x32x49x49>
tensor<[1,32,49,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 49 + d2, d3), memory_config: (49, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x16>>, >
shape: #ttnn.shape<1x32x512x512>
tensor<[1,32,512,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 512 + d2, d3), memory_config: (512, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x2>>, >
shape: #ttnn.shape<1x32x56x56>
tensor<[1,32,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 56 + d2, d3), memory_config: (56, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x32x60x80>
tensor<[1,32,60,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x3>>, >
shape: #ttnn.shape<1x32x75x75>
tensor<[1,32,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2400 + d1 * 75 + d2, d3), memory_config: (75, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x3>>, >
shape: #ttnn.shape<1x32x75x75>
tensor<[1,32,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2400 + d1 * 75 + d2, d3), memory_config: (75, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x32x7x7>
tensor<[1,32,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 7 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<95x3>>, >
shape: #ttnn.shape<1x32x95x95>
tensor<[1,32,95,95,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3040 + d1 * 95 + d2, d3), memory_config: (95, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<95x3>>, >
shape: #ttnn.shape<1x32x95x95>
tensor<[1,32,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3040 + d1 * 95 + d2, d3), memory_config: (95, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x334x14x14>
tensor<[1,334,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4676 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x4>>, >
shape: #ttnn.shape<1x336x112x112>
tensor<[1,336,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 112 + d2, d3), memory_config: (1176, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x336x14x14>
tensor<[1,336,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x336x24x24>
tensor<[1,336,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 24 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x2>>, >
shape: #ttnn.shape<1x336x48x48>
tensor<[1,336,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 48 + d2, d3), memory_config: (504, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x2>>, >
shape: #ttnn.shape<1x336x56x56>
tensor<[1,336,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 56 + d2, d3), memory_config: (588, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x2>>, >
shape: #ttnn.shape<1x336x56x56>
tensor<[1,336,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 56 + d2, d3), memory_config: (588, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x34x28x28>
tensor<[1,34,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 28 + d2, d3), memory_config: (30, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<154x1>>, >
shape: #ttnn.shape<1x352x14x14>
tensor<[1,352,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4928 + d1 * 14 + d2, d3), memory_config: (154, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x352x28x28>
tensor<[1,352,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 28 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x1>>, >
shape: #ttnn.shape<1x352x9x9>
tensor<[1,352,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3168 + d1 * 9 + d2, d3), memory_config: (99, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<1x36x12x144x144>
tensor<[1,36,12,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x36x14x14>
tensor<[1,36,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 14 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3888x5>>, >
shape: #ttnn.shape<1x36x24x144x144>
tensor<[1,36,24,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 124416 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (3888, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x36x28x28>
tensor<[1,36,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 28 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x36x28x28>
tensor<[1,36,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 28 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x2>>, >
shape: #ttnn.shape<1x36x56x56>
tensor<[1,36,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 56 + d2, d3), memory_config: (63, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x36x7x7>
tensor<[1,36,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 7 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1624x1>>, >
shape: #ttnn.shape<1x3712x14x14>
tensor<[1,3712,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 51968 + d1 * 14 + d2, d3), memory_config: (1624, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x1>>, >
shape: #ttnn.shape<1x3712x7x7>
tensor<[1,3712,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 7 + d2, d3), memory_config: (812, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x1>>, >
shape: #ttnn.shape<1x3712x7x7>
tensor<[1,3712,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 7 + d2, d3), memory_config: (812, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x384x10x10>
tensor<[1,384,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 10 + d2, d3), memory_config: (120, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x384x14x14>
tensor<[1,384,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 14 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<204x1>>, >
shape: #ttnn.shape<1x384x17x17>
tensor<[1,384,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6528 + d1 * 17 + d2, d3), memory_config: (204, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x384x28x28>
tensor<[1,384,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 28 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x384x7x7>
tensor<[1,384,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 7 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x384x8x8>
tensor<[1,384,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 * 8 + d2, d3), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x3>
tensor<[1,3,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x4096>
tensor<[1,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x4096x1536>
tensor<[1,4096,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<1x4096x256>
tensor<[1,4096,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<1x4096x384>
tensor<[1,4096,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<1x4096x384>
tensor<[1,4096,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x4096x64>
tensor<[1,4096,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x4096x64>
tensor<[1,4096,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x4096x768>
tensor<[1,4096,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x4096x768>
tensor<[1,4096,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x40x14x14>
tensor<[1,40,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 14 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x40x28x28>
tensor<[1,40,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 28 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x40x28x28>
tensor<[1,40,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 28 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x40x30x30>
tensor<[1,40,30,30,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 30 + d2, d3), memory_config: (38, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x40x30x30>
tensor<[1,40,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 30 + d2, d3), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x40x40x40>
tensor<[1,40,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x40x40x40>
tensor<[1,40,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x40x56x56>
tensor<[1,40,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 56 + d2, d3), memory_config: (70, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<182x1>>, >
shape: #ttnn.shape<1x416x14x14>
tensor<[1,416,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5824 + d1 * 14 + d2, d3), memory_config: (182, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x416x28x28>
tensor<[1,416,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 28 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x448x12x12>
tensor<[1,448,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 12 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x448x14x14>
tensor<[1,448,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 14 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x448x28x28>
tensor<[1,448,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 28 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x448x8x8>
tensor<[1,448,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 8 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x45x1>
tensor<[1,45,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x462x7x7>
tensor<[1,462,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3234 + d1 * 7 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x46x28x28>
tensor<[1,46,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 28 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<1x4800x128>
tensor<[1,4800,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<1x4800x128>
tensor<[1,4800,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x4800x1>
tensor<[1,4800,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x480x10x10>
tensor<[1,480,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 10 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x480x15x15>
tensor<[1,480,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 15 + d2, d3), memory_config: (225, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x1>>, >
shape: #ttnn.shape<1x480x20x20>
tensor<[1,480,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 20 + d2, d3), memory_config: (300, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x480x28x28>
tensor<[1,480,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 28 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x480x7x7>
tensor<[1,480,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 7 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13068x5>>, >
shape: #ttnn.shape<1x484x6x144x144>
tensor<[1,484,6,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 418176 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (13068, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x48x14x14>
tensor<[1,48,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 14 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x48x33x33>
tensor<[1,48,33,33,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 33 + d2, d3), memory_config: (50, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x48x33x33>
tensor<[1,48,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 33 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x2>>, >
shape: #ttnn.shape<1x48x38x38>
tensor<[1,48,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 38 + d2, d3), memory_config: (57, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x2>>, >
shape: #ttnn.shape<1x48x38x38>
tensor<[1,48,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 38 + d2, d3), memory_config: (57, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x48x56x56>
tensor<[1,48,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 56 + d2, d3), memory_config: (84, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x48x7x7>
tensor<[1,48,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 7 + d2, d3), memory_config: (11, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x4x13x128>
tensor<[1,4,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 13 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<1x4x16x49x49>
tensor<[1,4,16,49,49,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3136 + d1 * 784 + d2 * 49 + d3, d4), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x5>>, >
shape: #ttnn.shape<1x4x48x144x144>
tensor<[1,4,48,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 27648 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (864, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x50x1>
tensor<[1,50,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<1x51200>
tensor<[1,51200,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<368x2>>, >
shape: #ttnn.shape<1x512x23x40>
tensor<[1,512,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11776 + d1 * 23 + d2, d3), memory_config: (368, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x512x32x32>
tensor<[1,512,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 32 + d2, d3), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x512x32x32>
tensor<[1,512,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 32 + d2, d3), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x3>>, >
shape: #ttnn.shape<1x512x45x80>
tensor<[1,512,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 45 + d2, d3), memory_config: (720, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x2>>, >
shape: #ttnn.shape<1x512x56x56>
tensor<[1,512,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 56 + d2, d3), memory_config: (896, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x512x5x5>
tensor<[1,512,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 5 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x512x8x8>
tensor<[1,512,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 8 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x5>>, >
shape: #ttnn.shape<1x512x90x160>
tensor<[1,512,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 90 + d2, d3), memory_config: (1440, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x5>>, >
shape: #ttnn.shape<1x512x90x160>
tensor<[1,512,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 90 + d2, d3), memory_config: (1440, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<281x1>>, >
shape: #ttnn.shape<1x528x17x17>
tensor<[1,528,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8976 + d1 * 17 + d2, d3), memory_config: (281, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x544x14x14>
tensor<[1,544,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 14 + d2, d3), memory_config: (238, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x56x14x14>
tensor<[1,56,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 14 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x56x48x48>
tensor<[1,56,48,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 48 + d2, d3), memory_config: (84, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x56x48x48>
tensor<[1,56,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 48 + d2, d3), memory_config: (84, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x56x56x1>
tensor<[1,56,56,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x16>>, >
shape: #ttnn.shape<1x56x56x512>
tensor<[1,56,56,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x576x14x14>
tensor<[1,576,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 14 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x1>>, >
shape: #ttnn.shape<1x576x19x19>
tensor<[1,576,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 19 + d2, d3), memory_config: (342, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x576x7x7>
tensor<[1,576,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 7 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x58x28x28>
tensor<[1,58,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 28 + d2, d3), memory_config: (51, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x5x16x32>
tensor<[1,5,16,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5x1>
tensor<[1,5,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x608x14x14>
tensor<[1,608,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 14 + d2, d3), memory_config: (266, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x60x28x28>
tensor<[1,60,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 28 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x640x14x14>
tensor<[1,640,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 14 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x640x16x16>
tensor<[1,640,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 16 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x2>>, >
shape: #ttnn.shape<1x640x64x64>
tensor<[1,640,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 64 + d2, d3), memory_config: (1280, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x4>>, >
shape: #ttnn.shape<1x64x112x112>
tensor<[1,64,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 112 + d2, d3), memory_config: (224, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x5>>, >
shape: #ttnn.shape<1x64x147x147>
tensor<[1,64,147,147,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 147 + d2, d3), memory_config: (294, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x64x14x14>
tensor<[1,64,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 14 + d2, d3), memory_config: (28, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x64x14x14>
tensor<[1,64,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 14 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x5>>, >
shape: #ttnn.shape<1x64x150x150>
tensor<[1,64,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 150 + d2, d3), memory_config: (300, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x5>>, >
shape: #ttnn.shape<1x64x160x160>
tensor<[1,64,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 160 + d2, d3), memory_config: (320, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x10>>, >
shape: #ttnn.shape<1x64x180x320>
tensor<[1,64,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 180 + d2, d3), memory_config: (360, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1>
tensor<[1,64,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1x1>
tensor<[1,64,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x7>>, >
shape: #ttnn.shape<1x64x224x224>
tensor<[1,64,224,224,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 224 + d2, d3), memory_config: (448, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x64x256x256>
tensor<[1,64,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 256 + d2, d3), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x64x256x256>
tensor<[1,64,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 256 + d2, d3), memory_config: (512, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x64x28x28>
tensor<[1,64,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 28 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x64x2x2>
tensor<[1,64,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 2 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x64x35x35>
tensor<[1,64,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 35 + d2, d3), memory_config: (70, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x20>>, >
shape: #ttnn.shape<1x64x360x640>
tensor<[1,64,360,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 360 + d2, d3), memory_config: (720, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x64x4x49x49>
tensor<[1,64,4,49,49,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12544 + d1 * 196 + d2 * 49 + d3, d4), memory_config: (392, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x2>>, >
shape: #ttnn.shape<1x64x56x56>
tensor<[1,64,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 56 + d2, d3), memory_config: (112, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x2>>, >
shape: #ttnn.shape<1x64x56x56>
tensor<[1,64,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 56 + d2, d3), memory_config: (112, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x64x64x64>
tensor<[1,64,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x64x64x64>
tensor<[1,64,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<146x3>>, >
shape: #ttnn.shape<1x64x73x73>
tensor<[1,64,73,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4672 + d1 * 73 + d2, d3), memory_config: (146, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x3>>, >
shape: #ttnn.shape<1x64x80x80>
tensor<[1,64,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 80 + d2, d3), memory_config: (160, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x64x9x9>
tensor<[1,64,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 9 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<1x65536x192>
tensor<[1,65536,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<1x65536x192>
tensor<[1,65536,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x1>>, >
shape: #ttnn.shape<1x65536x1>
tensor<[1,65536,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x672x10x10>
tensor<[1,672,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 10 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x672x15x15>
tensor<[1,672,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 15 + d2, d3), memory_config: (315, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x672x20x20>
tensor<[1,672,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 20 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x672x24x24>
tensor<[1,672,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 24 + d2, d3), memory_config: (504, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x672x28x28>
tensor<[1,672,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 28 + d2, d3), memory_config: (588, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x672x28x28>
tensor<[1,672,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 28 + d2, d3), memory_config: (588, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x2>>, >
shape: #ttnn.shape<1x672x56x56>
tensor<[1,672,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 56 + d2, d3), memory_config: (1176, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x672x8x8>
tensor<[1,672,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 8 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x68x14x14>
tensor<[1,68,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 14 + d2, d3), memory_config: (30, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x2>>, >
shape: #ttnn.shape<1x68x56x56>
tensor<[1,68,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 56 + d2, d3), memory_config: (119, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x696x28x28>
tensor<[1,696,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 28 + d2, d3), memory_config: (609, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x696x28x28>
tensor<[1,696,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 28 + d2, d3), memory_config: (609, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x2>>, >
shape: #ttnn.shape<1x696x56x56>
tensor<[1,696,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 56 + d2, d3), memory_config: (1218, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x6x1536>
tensor<[1,6,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x6x15x15>
tensor<[1,6,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 90 + d1 * 15 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x6x15x15>
tensor<[1,6,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 90 + d1 * 15 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1>
tensor<[1,6,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x10>
tensor<[1,6,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x11>
tensor<[1,6,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x12>
tensor<[1,6,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x13>
tensor<[1,6,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x14>
tensor<[1,6,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x15>
tensor<[1,6,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x15>
tensor<[1,6,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x16>
tensor<[1,6,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x17>
tensor<[1,6,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x18>
tensor<[1,6,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x19>
tensor<[1,6,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x1>
tensor<[1,6,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x20>
tensor<[1,6,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x2>
tensor<[1,6,1,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x3>
tensor<[1,6,1,3,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x4>
tensor<[1,6,1,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x5>
tensor<[1,6,1,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x6>
tensor<[1,6,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x7>
tensor<[1,6,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x8>
tensor<[1,6,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x9>
tensor<[1,6,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x6x3072>
tensor<[1,6,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x704x14x14>
tensor<[1,704,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 14 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x71x7x64>
tensor<[1,71,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x71x7x7>
tensor<[1,71,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<383x1>>, >
shape: #ttnn.shape<1x720x17x17>
tensor<[1,720,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12240 + d1 * 17 + d2, d3), memory_config: (383, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x720x9x9>
tensor<[1,720,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6480 + d1 * 9 + d2, d3), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<433x1>>, >
shape: #ttnn.shape<1x728x19x19>
tensor<[1,728,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13832 + d1 * 19 + d2, d3), memory_config: (433, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<433x1>>, >
shape: #ttnn.shape<1x728x19x19>
tensor<[1,728,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13832 + d1 * 19 + d2, d3), memory_config: (433, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<865x2>>, >
shape: #ttnn.shape<1x728x38x38>
tensor<[1,728,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27664 + d1 * 38 + d2, d3), memory_config: (865, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<865x2>>, >
shape: #ttnn.shape<1x728x38x38>
tensor<[1,728,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27664 + d1 * 38 + d2, d3), memory_config: (865, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x72x14x14>
tensor<[1,72,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 14 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x72x14x14>
tensor<[1,72,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 14 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x2>>, >
shape: #ttnn.shape<1x72x40x40>
tensor<[1,72,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 40 + d2, d3), memory_config: (90, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<1x72x56x56>
tensor<[1,72,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 56 + d2, d3), memory_config: (126, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x72x7x7>
tensor<[1,72,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x3>>, >
shape: #ttnn.shape<1x72x80x80>
tensor<[1,72,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 80 + d2, d3), memory_config: (180, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x736x14x14>
tensor<[1,736,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 14 + d2, d3), memory_config: (322, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x12>>, >
shape: #ttnn.shape<1x768x384>
tensor<[1,768,384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 12, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x8>
tensor<[1,768,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x25>>, >
shape: #ttnn.shape<1x784>
tensor<[1,784,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 25, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<69x1>>, >
shape: #ttnn.shape<1x78x28x28>
tensor<[1,78,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2184 + d1 * 28 + d2, d3), memory_config: (69, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x7x1536>
tensor<[1,7,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7x1>
tensor<[1,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<1x7x7x1024>
tensor<[1,7,7,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<1x7x7x1024>
tensor<[1,7,7,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x7x7x1>
tensor<[1,7,7,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x7x7x2048>
tensor<[1,7,7,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x128>>, >
shape: #ttnn.shape<1x7x7x4096>
tensor<[1,7,7,4096,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x800x14x14>
tensor<[1,800,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 14 + d2, d3), memory_config: (350, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x80x10x10>
tensor<[1,80,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 800 + d1 * 10 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x80x10x10>
tensor<[1,80,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 800 + d1 * 10 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x80x14x14>
tensor<[1,80,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 14 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x80x14x14>
tensor<[1,80,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 14 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x80x15x15>
tensor<[1,80,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 15 + d2, d3), memory_config: (38, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x80x15x15>
tensor<[1,80,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 15 + d2, d3), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x1>>, >
shape: #ttnn.shape<1x80x20x20>
tensor<[1,80,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 20 + d2, d3), memory_config: (50, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x1>>, >
shape: #ttnn.shape<1x80x20x20>
tensor<[1,80,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 20 + d2, d3), memory_config: (50, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x80x7x7>
tensor<[1,80,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 7 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<255x1>>, >
shape: #ttnn.shape<1x816x10x10>
tensor<[1,816,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8160 + d1 * 10 + d2, d3), memory_config: (255, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<485x1>>, >
shape: #ttnn.shape<1x816x19x19>
tensor<[1,816,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15504 + d1 * 19 + d2, d3), memory_config: (485, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x832x14x14>
tensor<[1,832,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 14 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x864x14x14>
tensor<[1,864,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 14 + d2, d3), memory_config: (378, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x88x17x17>
tensor<[1,88,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1496 + d1 * 17 + d2, d3), memory_config: (47, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x88x17x17>
tensor<[1,88,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1496 + d1 * 17 + d2, d3), memory_config: (47, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x896x14x14>
tensor<[1,896,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 14 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x896x7x7>
tensor<[1,896,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 7 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x8x10x10>
tensor<[1,8,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 10 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x8x10x10>
tensor<[1,8,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 10 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x4>>, >
shape: #ttnn.shape<1x8x112x112>
tensor<[1,8,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 112 + d2, d3), memory_config: (28, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x8x1536>
tensor<[1,8,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1>
tensor<[1,8,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1>
tensor<[1,8,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x10>
tensor<[1,8,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x10>
tensor<[1,8,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x11>
tensor<[1,8,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x12>
tensor<[1,8,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x13>
tensor<[1,8,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x14>
tensor<[1,8,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x15>
tensor<[1,8,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x16>
tensor<[1,8,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x17>
tensor<[1,8,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x18>
tensor<[1,8,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x19>
tensor<[1,8,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x1>
tensor<[1,8,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x20>
tensor<[1,8,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x2>
tensor<[1,8,1,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x3>
tensor<[1,8,1,3,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x4>
tensor<[1,8,1,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x5>
tensor<[1,8,1,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x6>
tensor<[1,8,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x7>
tensor<[1,8,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x8>
tensor<[1,8,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x9>
tensor<[1,8,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x64>>, >
shape: #ttnn.shape<1x8x256x2048>
tensor<[1,8,256,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x8x3072>
tensor<[1,8,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x4>>, >
shape: #ttnn.shape<1x8x32x128>
tensor<[1,8,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (8, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x928x14x14>
tensor<[1,928,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 14 + d2, d3), memory_config: (406, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x928x7x7>
tensor<[1,928,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6496 + d1 * 7 + d2, d3), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x92x14x14>
tensor<[1,92,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 14 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x960x12x12>
tensor<[1,960,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 12 + d2, d3), memory_config: (360, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x960x14x14>
tensor<[1,960,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 14 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x1>>, >
shape: #ttnn.shape<1x960x24x24>
tensor<[1,960,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 24 + d2, d3), memory_config: (720, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x960x32x32>
tensor<[1,960,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 32 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x1>>, >
shape: #ttnn.shape<1x960x3x3>
tensor<[1,960,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 3 + d2, d3), memory_config: (90, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x2>>, >
shape: #ttnn.shape<1x960x64x64>
tensor<[1,960,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 64 + d2, d3), memory_config: (1920, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x4>>, >
shape: #ttnn.shape<1x96x112x112>
tensor<[1,96,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 112 + d2, d3), memory_config: (336, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x4>>, >
shape: #ttnn.shape<1x96x120x120>
tensor<[1,96,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 120 + d2, d3), memory_config: (360, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<390x5>>, >
shape: #ttnn.shape<1x96x130x130>
tensor<[1,96,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12480 + d1 * 130 + d2, d3), memory_config: (390, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x96x14x14>
tensor<[1,96,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 14 + d2, d3), memory_config: (42, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x96x14x14>
tensor<[1,96,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 14 + d2, d3), memory_config: (42, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x1>>, >
shape: #ttnn.shape<1x96x19x19>
tensor<[1,96,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 19 + d2, d3), memory_config: (57, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x1>>, >
shape: #ttnn.shape<1x96x19x19>
tensor<[1,96,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 19 + d2, d3), memory_config: (57, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x96x28x28>
tensor<[1,96,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 28 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x2>>, >
shape: #ttnn.shape<1x96x35x35>
tensor<[1,96,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 35 + d2, d3), memory_config: (105, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x2>>, >
shape: #ttnn.shape<1x96x56x56>
tensor<[1,96,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 56 + d2, d3), memory_config: (168, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x2>>, >
shape: #ttnn.shape<1x96x60x60>
tensor<[1,96,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 60 + d2, d3), memory_config: (180, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<195x3>>, >
shape: #ttnn.shape<1x96x65x65>
tensor<[1,96,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6240 + d1 * 65 + d2, d3), memory_config: (195, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<213x3>>, >
shape: #ttnn.shape<1x96x71x71>
tensor<[1,96,71,71,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6816 + d1 * 71 + d2, d3), memory_config: (213, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<219x3>>, >
shape: #ttnn.shape<1x96x73x73>
tensor<[1,96,73,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7008 + d1 * 73 + d2, d3), memory_config: (219, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x98x28x28>
tensor<[1,98,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 28 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<434x1>>, >
shape: #ttnn.shape<1x992x14x14>
tensor<[1,992,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13888 + d1 * 14 + d2, d3), memory_config: (434, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<217x1>>, >
shape: #ttnn.shape<1x992x7x7>
tensor<[1,992,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6944 + d1 * 7 + d2, d3), memory_config: (217, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x9x1024>
tensor<[1,9,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x9x1024>
tensor<[1,9,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x9x1536>
tensor<[1,9,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x9x2048>
tensor<[1,9,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x9x2048>
tensor<[1,9,2048,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<972x5>>, >
shape: #ttnn.shape<1x9x24x144x144>
tensor<[1,9,24,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 31104 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (972, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<1x9x48x144x144>
tensor<[1,9,48,144,144,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x9x768>
tensor<[1,9,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x9x768>
tensor<[1,9,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<201x3072>
tensor<[201,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<201x768>
tensor<[201,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x40>>, >
shape: #ttnn.shape<2048x1280>
tensor<[2048,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<2048x256>
tensor<[2048,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x9>>, >
shape: #ttnn.shape<2048x262>
tensor<[2048,262,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 9, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<2048x768>
tensor<[2048,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x320>>, >
shape: #ttnn.shape<256x10240>
tensor<[256,10240,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 320, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<256x1024>
tensor<[256,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<256x1280>
tensor<[256,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<256x1536>
tensor<[256,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<256x160>
tensor<[256,160,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<256x256>
tensor<[256,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<256x2>
tensor<[256,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<256x32>
tensor<[256,32,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x128>>, >
shape: #ttnn.shape<256x4096>
tensor<[256,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<256x512>
tensor<[256,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x192>>, >
shape: #ttnn.shape<256x6144>
tensor<[256,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<256x64>
tensor<[256,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x24>>, >
shape: #ttnn.shape<256x768>
tensor<[256,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x72>>, >
shape: #ttnn.shape<257x2304>
tensor<[257,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x96>>, >
shape: #ttnn.shape<257x3072>
tensor<[257,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<257x768>
tensor<[257,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<25x2>
tensor<[25,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<25x3072>
tensor<[25,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<25x768>
tensor<[25,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x954>>, >
shape: #ttnn.shape<27x30522>
tensor<[27,30522,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 954, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<27x38>
tensor<[27,38,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<27x50257>
tensor<[27,50257,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<2x128x1>
tensor<[2,128,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<2x12x13x13>
tensor<[2,12,13,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 156 + d1 * 13 + d2, d3), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x13x1>
tensor<[2,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<2x13x768>
tensor<[2,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1x7x7>
tensor<[2,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x7x1>
tensor<[2,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<2x8x7x7>
tensor<[2,8,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 7 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<300x128>
tensor<[300,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x64>>, >
shape: #ttnn.shape<300x2048>
tensor<[300,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<300x320>
tensor<[300,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<300x512>
tensor<[300,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<300x64>
tensor<[300,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<3136x128>
tensor<[3136,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x12>>, >
shape: #ttnn.shape<3136x384>
tensor<[3136,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x102>>, >
shape: #ttnn.shape<3234>
tensor<[3234,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 102, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<32x1536>
tensor<[32,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x144>>, >
shape: #ttnn.shape<32x4608>
tensor<[32,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<32x6144>
tensor<[32,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<36x12x144x144>
tensor<[36,12,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1728 + d1 * 144 + d2, d3), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3888x5>>, >
shape: #ttnn.shape<36x24x144x144>
tensor<[36,24,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3456 + d1 * 144 + d2, d3), memory_config: (3888, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<4096x1536>
tensor<[4096,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x80>>, >
shape: #ttnn.shape<4096x2560>
tensor<[4096,2560,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 80, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<4096x256>
tensor<[4096,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x96>>, >
shape: #ttnn.shape<4096x3072>
tensor<[4096,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<4096x320>
tensor<[4096,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<4096x384>
tensor<[4096,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<4096x64>
tensor<[4096,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<4096x768>
tensor<[4096,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<45x3072>
tensor<[45,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<45x768>
tensor<[45,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<4800x128>
tensor<[4800,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x16>>, >
shape: #ttnn.shape<4800x512>
tensor<[4800,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13068x5>>, >
shape: #ttnn.shape<484x6x144x144>
tensor<[484,6,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 144 + d2, d3), memory_config: (13068, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<49x1024>
tensor<[49,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<49x3072>
tensor<[49,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<4x16x1x13>
tensor<[4,16,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<4x16x49x49>
tensor<[4,16,49,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 49 + d2, d3), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x1x1>
tensor<[4,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x5>>, >
shape: #ttnn.shape<4x48x144x144>
tensor<[4,48,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6912 + d1 * 144 + d2, d3), memory_config: (864, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<50x3072>
tensor<[50,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<50x768>
tensor<[50,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x36>>, >
shape: #ttnn.shape<5184x1152>
tensor<[5184,1152,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 36, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x72>>, >
shape: #ttnn.shape<5184x2304>
tensor<[5184,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x12>>, >
shape: #ttnn.shape<5184x384>
tensor<[5184,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x24>>, >
shape: #ttnn.shape<5184x768>
tensor<[5184,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<52x1024>
tensor<[52,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x48>>, >
shape: #ttnn.shape<576x1536>
tensor<[576,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (18, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x144>>, >
shape: #ttnn.shape<576x4608>
tensor<[576,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (18, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<5x1024>
tensor<[5,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<5x4096>
tensor<[5,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<5x51200>
tensor<[5,51200,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x320>>, >
shape: #ttnn.shape<64x10240>
tensor<[64,10240,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 320, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<64x1280>
tensor<[64,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<64x4x49x49>
tensor<[64,4,49,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 49 + d2, d3), memory_config: (392, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<65536x192>
tensor<[65536,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x24>>, >
shape: #ttnn.shape<65536x768>
tensor<[65536,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2048, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x6>>, >
shape: #ttnn.shape<69696x192>
tensor<[69696,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2178, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x18>>, >
shape: #ttnn.shape<69696x576>
tensor<[69696,576,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2178, 18, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6>
tensor<[6,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<6x1024>
tensor<[6,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<6x1024>
tensor<[6,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x8>>, >
shape: #ttnn.shape<6x1x100x256>
tensor<[6,1,100,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (19, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x1>>, >
shape: #ttnn.shape<6x1x100x4>
tensor<[6,1,100,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (19, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x3>>, >
shape: #ttnn.shape<6x1x100x92>
tensor<[6,1,100,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (19, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<6x4096>
tensor<[6,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x7>>, >
shape: #ttnn.shape<768x196>
tensor<[768,196,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (24, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<784x256>
tensor<[784,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x24>>, >
shape: #ttnn.shape<784x768>
tensor<[784,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x72>>, >
shape: #ttnn.shape<7x2304>
tensor<[7,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<7x3072>
tensor<[7,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<7x768>
tensor<[7,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x1>>, >
shape: #ttnn.shape<920x1x1>
tensor<[920,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x1x256>
tensor<[920,1,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x1x256>
tensor<[920,1,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x64>>, >
shape: #ttnn.shape<920x2048>
tensor<[920,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x256>
tensor<[920,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<9x1024>
tensor<[9,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<9x128>
tensor<[9,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<9x16384>
tensor<[9,16384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 512, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<9x2048>
tensor<[9,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<972x5>>, >
shape: #ttnn.shape<9x24x144x144>
tensor<[9,24,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3456 + d1 * 144 + d2, d3), memory_config: (972, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x938>>, >
shape: #ttnn.shape<9x30000>
tensor<[9,30000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 938, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<9x3072>
tensor<[9,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<9x4096>
tensor<[9,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<9x48x144x144>
tensor<[9,48,144,144,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6912 + d1 * 144 + d2, d3), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<9x768>
tensor<[9,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<9x8192>
tensor<[9,8192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 256, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<2x128x1>
tensor<[2,128,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<11>
tensor<[11,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12>
tensor<[12,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13>
tensor<[13,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<14>
tensor<[14,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<15>
tensor<[15,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16>
tensor<[16,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<17>
tensor<[17,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<18>
tensor<[18,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<19>
tensor<[19,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<20>
tensor<[20,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<21>
tensor<[21,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<22>
tensor<[22,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<23>
tensor<[23,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<24>
tensor<[24,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<25>
tensor<[25,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<26>
tensor<[26,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<27>
tensor<[27,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<28>
tensor<[28,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<29>
tensor<[29,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32>
tensor<[32,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5>
tensor<[5,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6>
tensor<[6,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<7>
tensor<[7,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8>
tensor<[8,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<9>
tensor<[9,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32>
tensor<[32,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x29>>, >
shape: #ttnn.shape<8x100x920>
tensor<[8,100,920,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 29, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<230x29>>, >
shape: #ttnn.shape<8x920x920>
tensor<[8,920,920,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 920 + d1, d2), memory_config: (230, 29, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,bf16]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<120x28x28>
tensor<[120,28,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 28 + d1, d2), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1280x16x32>
tensor<[1280,16,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1280x16x16>
tensor<[1280,16,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1280x32x32>
tensor<[1280,32,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1280x8x16>
tensor<[1280,8,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<128x32x64>
tensor<[128,32,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x2>>, >
shape: #ttnn.shape<128x49x49>
tensor<[128,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (196, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<128x49x32>
tensor<[128,49,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<128x64x64>
tensor<[128,64,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (256, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<12x10x64>
tensor<[12,10,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (4, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<12x10x10>
tensor<[12,10,10,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<12x12x12>
tensor<[12,12,12,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<12x12x128>
tensor<[12,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (5, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<12x12x64>
tensor<[12,12,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (5, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<12x12x12>
tensor<[12,12,12,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<12x14x64>
tensor<[12,14,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (6, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<12x14x14>
tensor<[12,14,14,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (6, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<563x2>>, >
shape: #ttnn.shape<12x1500x64>
tensor<[12,1500,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (563, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<563x47>>, >
shape: #ttnn.shape<12x1500x1500>
tensor<[12,1500,1500,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (563, 47, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<12x16x64>
tensor<[12,16,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (6, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<12x16x16>
tensor<[12,16,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (6, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x2>>, >
shape: #ttnn.shape<12x197x64>
tensor<[12,197,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (74, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x7>>, >
shape: #ttnn.shape<12x197x197>
tensor<[12,197,197,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (74, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x13>
tensor<[12,1,13,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x14>
tensor<[12,1,14,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x15>
tensor<[12,1,15,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x16>
tensor<[12,1,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x17>
tensor<[12,1,17,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x18>
tensor<[12,1,18,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x19>
tensor<[12,1,19,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x20>
tensor<[12,1,20,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x21>
tensor<[12,1,21,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x22>
tensor<[12,1,22,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x23>
tensor<[12,1,23,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x24>
tensor<[12,1,24,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x25>
tensor<[12,1,25,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x26>
tensor<[12,1,26,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x27>
tensor<[12,1,27,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x28>
tensor<[12,1,28,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x29>
tensor<[12,1,29,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x128>
tensor<[12,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x1>
tensor<[12,1,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x10>
tensor<[12,1,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x11>
tensor<[12,1,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x12>
tensor<[12,1,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x13>
tensor<[12,1,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x14>
tensor<[12,1,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x15>
tensor<[12,1,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x16>
tensor<[12,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x17>
tensor<[12,1,17,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x18>
tensor<[12,1,18,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x19>
tensor<[12,1,19,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x2>
tensor<[12,1,2,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x20>
tensor<[12,1,20,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x3>
tensor<[12,1,3,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x4>
tensor<[12,1,4,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x46>
tensor<[12,1,46,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x47>
tensor<[12,1,47,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x48>
tensor<[12,1,48,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x49>
tensor<[12,1,49,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x5>
tensor<[12,1,5,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x50>
tensor<[12,1,50,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x51>
tensor<[12,1,51,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x52>
tensor<[12,1,52,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x53>
tensor<[12,1,53,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x54>
tensor<[12,1,54,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x55>
tensor<[12,1,55,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x56>
tensor<[12,1,56,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x57>
tensor<[12,1,57,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x58>
tensor<[12,1,58,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x59>
tensor<[12,1,59,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x6>
tensor<[12,1,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x60>
tensor<[12,1,60,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x61>
tensor<[12,1,61,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x62>
tensor<[12,1,62,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x63>
tensor<[12,1,63,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x65>
tensor<[12,1,65,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x66>
tensor<[12,1,66,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x67>
tensor<[12,1,67,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x68>
tensor<[12,1,68,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x69>
tensor<[12,1,69,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x7>
tensor<[12,1,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x70>
tensor<[12,1,70,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x71>
tensor<[12,1,71,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x72>
tensor<[12,1,72,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x73>
tensor<[12,1,73,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x74>
tensor<[12,1,74,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x75>
tensor<[12,1,75,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x76>
tensor<[12,1,76,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x77>
tensor<[12,1,77,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x78>
tensor<[12,1,78,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x79>
tensor<[12,1,79,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x8>
tensor<[12,1,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x80>
tensor<[12,1,80,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x81>
tensor<[12,1,81,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x82>
tensor<[12,1,82,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x83>
tensor<[12,1,83,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x84>
tensor<[12,1,84,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x85>
tensor<[12,1,85,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x86>
tensor<[12,1,86,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x87>
tensor<[12,1,87,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x88>
tensor<[12,1,88,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x89>
tensor<[12,1,89,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x1x9>
tensor<[12,1,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x90>
tensor<[12,1,90,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x91>
tensor<[12,1,91,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x92>
tensor<[12,1,92,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x93>
tensor<[12,1,93,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x94>
tensor<[12,1,94,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x95>
tensor<[12,1,95,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<12x1x96>
tensor<[12,1,96,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x97>
tensor<[12,1,97,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x98>
tensor<[12,1,98,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<12x1x99>
tensor<[12,1,99,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<12x1x64>
tensor<[12,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<76x2>>, >
shape: #ttnn.shape<12x201x64>
tensor<[12,201,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (76, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<76x7>>, >
shape: #ttnn.shape<12x201x201>
tensor<[12,201,201,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (76, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<97x2>>, >
shape: #ttnn.shape<12x257x64>
tensor<[12,257,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (97, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<97x9>>, >
shape: #ttnn.shape<12x257x257>
tensor<[12,257,257,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (97, 9, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<12x25x64>
tensor<[12,25,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<12x25x25>
tensor<[12,25,25,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x2>>, >
shape: #ttnn.shape<12x45x64>
tensor<[12,45,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (17, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x2>>, >
shape: #ttnn.shape<12x45x45>
tensor<[12,45,45,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (17, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<12x50x64>
tensor<[12,50,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (19, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<12x50x50>
tensor<[12,50,50,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (19, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<12x7x7>
tensor<[12,7,7,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<12x7x64>
tensor<[12,7,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (3, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<12x8x8>
tensor<[12,8,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<12x8x64>
tensor<[12,8,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<12x9x9>
tensor<[12,9,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<12x9x64>
tensor<[12,9,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (4, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6534x1>>, >
shape: #ttnn.shape<1452x144x32>
tensor<[1452,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (6534, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6534x5>>, >
shape: #ttnn.shape<1452x144x144>
tensor<[1452,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (6534, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<1536x16x32>
tensor<[1536,16,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (768, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1536x1>>, >
shape: #ttnn.shape<1536x32x32>
tensor<[1536,32,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1536, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1536x1>>, >
shape: #ttnn.shape<1536x32x32>
tensor<[1536,32,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1536, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x1>>, >
shape: #ttnn.shape<1536x64x32>
tensor<[1536,64,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (3072, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<16x10x64>
tensor<[16,10,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<16x10x10>
tensor<[16,10,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (5, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<685x3>>, >
shape: #ttnn.shape<16x1370x80>
tensor<[16,1370,80,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (685, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<685x43>>, >
shape: #ttnn.shape<16x1370x1370>
tensor<[16,1370,1370,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (685, 43, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x2>>, >
shape: #ttnn.shape<16x197x64>
tensor<[16,197,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (99, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x7>>, >
shape: #ttnn.shape<16x197x197>
tensor<[16,197,197,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (99, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x1>
tensor<[16,1,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x10>
tensor<[16,1,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x11>
tensor<[16,1,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x12>
tensor<[16,1,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x13>
tensor<[16,1,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x14>
tensor<[16,1,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x15>
tensor<[16,1,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x16>
tensor<[16,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x17>
tensor<[16,1,17,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x18>
tensor<[16,1,18,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x19>
tensor<[16,1,19,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x2>
tensor<[16,1,2,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x20>
tensor<[16,1,20,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x21>
tensor<[16,1,21,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x22>
tensor<[16,1,22,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x23>
tensor<[16,1,23,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x24>
tensor<[16,1,24,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x25>
tensor<[16,1,25,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x26>
tensor<[16,1,26,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x27>
tensor<[16,1,27,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x28>
tensor<[16,1,28,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x29>
tensor<[16,1,29,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x3>
tensor<[16,1,3,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x4>
tensor<[16,1,4,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x5>
tensor<[16,1,5,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x6>
tensor<[16,1,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x7>
tensor<[16,1,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x8>
tensor<[16,1,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<16x1x9>
tensor<[16,1,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<16x1x64>
tensor<[16,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<16x256x64>
tensor<[16,256,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<16x256x256>
tensor<[16,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x3>>, >
shape: #ttnn.shape<16x32x96>
tensor<[16,32,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (16, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<16x5x64>
tensor<[16,5,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<16x5x5>
tensor<[16,5,5,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<16x6x6>
tensor<[16,6,6,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<16x6x64>
tensor<[16,6,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (3, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<16x7x7>
tensor<[16,7,7,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<16x7x64>
tensor<[16,7,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (4, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<16x9x9>
tensor<[16,9,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x1>>, >
shape: #ttnn.shape<16x9x9>
tensor<[16,9,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (5, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<16x9x128>
tensor<[16,9,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (5, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<16x9x64>
tensor<[16,9,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (5, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<184x14x14>
tensor<[184,14,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (81, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<184x7x14>
tensor<[184,7,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (41, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<18x14x56>
tensor<[18,14,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<18x28x56>
tensor<[18,28,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 28 + d1, d2), memory_config: (16, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<18x56x56>
tensor<[18,56,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 56 + d1, d2), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<18x56x56>
tensor<[18,56,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 56 + d1, d2), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<18x56x56>
tensor<[18,56,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 56 + d1, d2), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<18x7x56>
tensor<[18,7,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x8>>, >
shape: #ttnn.shape<192x128x256>
tensor<[192,128,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (768, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x1>>, >
shape: #ttnn.shape<192x144x32>
tensor<[192,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (864, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x5>>, >
shape: #ttnn.shape<192x144x144>
tensor<[192,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (864, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1536x8>>, >
shape: #ttnn.shape<192x256x256>
tensor<[192,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (1536, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x10x128>
tensor<[1,10,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x10x1536>
tensor<[1,10,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x11x128>
tensor<[1,11,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x11x1536>
tensor<[1,11,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x16>
tensor<[1,12,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x13x128>
tensor<[1,13,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x13x1536>
tensor<[1,13,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x14x1536>
tensor<[1,14,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x15x128>
tensor<[1,15,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x15x1536>
tensor<[1,15,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x16384x256>
tensor<[1,16384,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x10>>, >
shape: #ttnn.shape<1x19200x300>
tensor<[1,19200,300,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4748>>, >
shape: #ttnn.shape<1x1x151936>
tensor<[1,1,151936,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4748, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x27x768>
tensor<[1,27,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x1>
tensor<[1,3072,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x512x12>
tensor<[1,512,12,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 512 + d1, d2), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1>
tensor<[1,64,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x12>
tensor<[1,64,12,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x13>
tensor<[1,64,13,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x32>
tensor<[1,64,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x6x128>
tensor<[1,6,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x6x1536>
tensor<[1,6,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x7x128>
tensor<[1,7,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x7x1536>
tensor<[1,7,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x8x128>
tensor<[1,8,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x8x1536>
tensor<[1,8,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x9x1536>
tensor<[1,9,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<200x14x14>
tensor<[200,14,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (88, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<200x7x14>
tensor<[200,7,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<972x1>>, >
shape: #ttnn.shape<216x144x32>
tensor<[216,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (972, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<972x5>>, >
shape: #ttnn.shape<216x144x144>
tensor<[216,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (972, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<240x14x28>
tensor<[240,14,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<240x28x28>
tensor<[240,28,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 28 + d1, d2), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<24x13x64>
tensor<[24,13,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<24x13x13>
tensor<[24,13,13,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<24x32x32>
tensor<[24,32,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x4>>, >
shape: #ttnn.shape<24x32x128>
tensor<[24,32,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (24, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<256x128x128>
tensor<[256,128,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (1024, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<256x128x128>
tensor<[256,128,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (1024, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<256x128x128>
tensor<[256,128,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (1024, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<256x128x128>
tensor<[256,128,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (1024, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<256x16x128>
tensor<[256,16,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (128, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<256x16x32>
tensor<[256,16,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<256x32x32>
tensor<[256,32,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<256x32x128>
tensor<[256,32,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (256, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<256x49x49>
tensor<[256,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (392, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<256x49x32>
tensor<[256,49,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<256x64x128>
tensor<[256,64,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (512, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<28x13x13>
tensor<[28,13,13,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x4>>, >
shape: #ttnn.shape<28x13x128>
tensor<[28,13,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (12, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13068x1>>, >
shape: #ttnn.shape<2904x144x32>
tensor<[2904,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (13068, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13068x5>>, >
shape: #ttnn.shape<2904x144x144>
tensor<[2904,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (13068, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<2x4096x32>
tensor<[2,4096,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<2x4096x256>
tensor<[2,4096,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (256, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x2>>, >
shape: #ttnn.shape<2x4800x64>
tensor<[2,4800,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (300, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x10>>, >
shape: #ttnn.shape<2x4800x300>
tensor<[2,4800,300,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (300, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<32x32x32>
tensor<[32,32,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x4>>, >
shape: #ttnn.shape<32x32x128>
tensor<[32,32,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (32, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x2>>, >
shape: #ttnn.shape<32x49x49>
tensor<[32,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (49, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<32x49x32>
tensor<[32,49,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (49, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<36x14x28>
tensor<[36,14,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<36x28x28>
tensor<[36,28,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 28 + d1, d2), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<36x28x28>
tensor<[36,28,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 28 + d1, d2), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<36x7x28>
tensor<[36,7,28,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1536x4>>, >
shape: #ttnn.shape<384x128x128>
tensor<[384,128,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (1536, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x1>>, >
shape: #ttnn.shape<384x256x32>
tensor<[384,256,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (3072, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<384x32x32>
tensor<[384,32,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (384, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x4>>, >
shape: #ttnn.shape<384x64x128>
tensor<[384,64,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (768, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x16>>, >
shape: #ttnn.shape<3x1024x512>
tensor<[3,1024,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (96, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x2>>, >
shape: #ttnn.shape<3x1445x64>
tensor<[3,1445,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (136, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x46>>, >
shape: #ttnn.shape<3x1445x1445>
tensor<[3,1445,1445,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (136, 46, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x10>>, >
shape: #ttnn.shape<3x320x320>
tensor<[3,320,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 320 + d1, d2), memory_config: (30, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x16>>, >
shape: #ttnn.shape<3x512x512>
tensor<[3,512,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 512 + d1, d2), memory_config: (48, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x1>>, >
shape: #ttnn.shape<432x144x32>
tensor<[432,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (1944, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x5>>, >
shape: #ttnn.shape<432x144x144>
tensor<[432,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (1944, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<480x14x14>
tensor<[480,14,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<480x7x14>
tensor<[480,7,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x1>>, >
shape: #ttnn.shape<5x1024x32>
tensor<[5,1024,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (160, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x8>>, >
shape: #ttnn.shape<5x1024x256>
tensor<[5,1024,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (160, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<188x2>>, >
shape: #ttnn.shape<5x1200x64>
tensor<[5,1200,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (188, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<188x10>>, >
shape: #ttnn.shape<5x1200x300>
tensor<[5,1200,300,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (188, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<640x32x64>
tensor<[640,32,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x2>>, >
shape: #ttnn.shape<640x64x64>
tensor<[640,64,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (1280, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<64x120x160>
tensor<[64,120,160,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 120 + d1, d2), memory_config: (240, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x8>>, >
shape: #ttnn.shape<64x160x240>
tensor<[64,160,240,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 160 + d1, d2), memory_config: (320, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<64x1x64>
tensor<[64,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (2, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<64x1x64>
tensor<[64,1,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (2, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<64x1x1>
tensor<[64,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<64x1x13>
tensor<[64,1,13,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<64x20x30>
tensor<[64,20,30,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 20 + d1, d2), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x10>>, >
shape: #ttnn.shape<64x240x320>
tensor<[64,240,320,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 240 + d1, d2), memory_config: (480, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<64x30x40>
tensor<[64,30,40,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 30 + d1, d2), memory_config: (60, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x15>>, >
shape: #ttnn.shape<64x320x480>
tensor<[64,320,480,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 320 + d1, d2), memory_config: (640, 15, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x2>>, >
shape: #ttnn.shape<64x40x60>
tensor<[64,40,60,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 40 + d1, d2), memory_config: (80, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x20>>, >
shape: #ttnn.shape<64x480x640>
tensor<[64,480,640,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 480 + d1, d2), memory_config: (960, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<64x49x49>
tensor<[64,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<64x49x32>
tensor<[64,49,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (98, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<64x60x80>
tensor<[64,60,80,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 60 + d1, d2), memory_config: (120, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x4>>, >
shape: #ttnn.shape<64x80x120>
tensor<[64,80,120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 80 + d1, d2), memory_config: (160, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<64x9x9>
tensor<[64,9,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x2>>, >
shape: #ttnn.shape<64x9x64>
tensor<[64,9,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (18, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<672x14x14>
tensor<[672,14,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<672x7x14>
tensor<[672,7,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<6x15x64>
tensor<[6,15,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<6x15x15>
tensor<[6,15,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x1>
tensor<[6,1,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x10>
tensor<[6,1,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x11>
tensor<[6,1,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x12>
tensor<[6,1,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x13>
tensor<[6,1,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x14>
tensor<[6,1,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x15>
tensor<[6,1,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x16>
tensor<[6,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x17>
tensor<[6,1,17,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x18>
tensor<[6,1,18,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x19>
tensor<[6,1,19,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x2>
tensor<[6,1,2,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x20>
tensor<[6,1,20,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x3>
tensor<[6,1,3,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x4>
tensor<[6,1,4,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x5>
tensor<[6,1,5,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x6>
tensor<[6,1,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x7>
tensor<[6,1,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x8>
tensor<[6,1,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x1x9>
tensor<[6,1,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<6x1x64>
tensor<[6,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<71x7x7>
tensor<[71,7,7,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<71x7x64>
tensor<[71,7,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (16, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3267x1>>, >
shape: #ttnn.shape<726x144x32>
tensor<[726,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (3267, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3267x5>>, >
shape: #ttnn.shape<726x144x144>
tensor<[726,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (3267, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<72x14x14>
tensor<[72,14,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x2>>, >
shape: #ttnn.shape<72x28x56>
tensor<[72,28,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 28 + d1, d2), memory_config: (63, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<72x56x56>
tensor<[72,56,56,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 56 + d1, d2), memory_config: (126, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<72x7x14>
tensor<[72,7,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x1>>, >
shape: #ttnn.shape<768x128x32>
tensor<[768,128,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 128 + d1, d2), memory_config: (3072, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<768x32x32>
tensor<[768,32,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (768, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x2>>, >
shape: #ttnn.shape<768x32x64>
tensor<[768,32,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (768, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1536x2>>, >
shape: #ttnn.shape<768x64x64>
tensor<[768,64,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (1536, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3888x1>>, >
shape: #ttnn.shape<864x144x32>
tensor<[864,144,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (3888, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3888x5>>, >
shape: #ttnn.shape<864x144x144>
tensor<[864,144,144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 144 + d1, d2), memory_config: (3888, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<8x100x32>
tensor<[8,100,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x4>>, >
shape: #ttnn.shape<8x100x100>
tensor<[8,100,100,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<8x100x32>
tensor<[8,100,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x3>>, >
shape: #ttnn.shape<8x1024x80>
tensor<[8,1024,80,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (256, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x32>>, >
shape: #ttnn.shape<8x1024x1024>
tensor<[8,1024,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (256, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<8x1024x9>
tensor<[8,1024,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x3>>, >
shape: #ttnn.shape<8x1024x80>
tensor<[8,1024,80,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (256, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<8x10x64>
tensor<[8,10,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<8x10x10>
tensor<[8,10,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x1>
tensor<[8,1,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x10>
tensor<[8,1,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x11>
tensor<[8,1,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x12>
tensor<[8,1,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x13>
tensor<[8,1,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x14>
tensor<[8,1,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x15>
tensor<[8,1,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x16>
tensor<[8,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x17>
tensor<[8,1,17,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x18>
tensor<[8,1,18,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x19>
tensor<[8,1,19,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x2>
tensor<[8,1,2,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x20>
tensor<[8,1,20,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x3>
tensor<[8,1,3,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x4>
tensor<[8,1,4,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x5>
tensor<[8,1,5,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x6>
tensor<[8,1,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x7>
tensor<[8,1,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x8>
tensor<[8,1,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1x9>
tensor<[8,1,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<8x1x64>
tensor<[8,1,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x3>>, >
shape: #ttnn.shape<8x2048x96>
tensor<[8,2048,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (512, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<8x2048x256>
tensor<[8,2048,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<8x256x256>
tensor<[8,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<8x256x9>
tensor<[8,256,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x5>>, >
shape: #ttnn.shape<8x256x160>
tensor<[8,256,160,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x5>>, >
shape: #ttnn.shape<8x256x160>
tensor<[8,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<8x256x32>
tensor<[8,256,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x64>>, >
shape: #ttnn.shape<8x256x2048>
tensor<[8,256,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<8x256x256>
tensor<[8,256,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x5>>, >
shape: #ttnn.shape<8x256x160>
tensor<[8,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (64, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x2>>, >
shape: #ttnn.shape<8x300x64>
tensor<[8,300,64,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (75, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x10>>, >
shape: #ttnn.shape<8x300x300>
tensor<[8,300,300,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (75, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x2>>, >
shape: #ttnn.shape<8x4096x40>
tensor<[8,4096,40,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (1024, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<8x4096x4096>
tensor<[8,4096,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (1024, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x1>>, >
shape: #ttnn.shape<8x4096x9>
tensor<[8,4096,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (1024, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x2>>, >
shape: #ttnn.shape<8x4096x40>
tensor<[8,4096,40,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (1024, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<8x64x64>
tensor<[8,64,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (16, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<8x64x9>
tensor<[8,64,9,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x5>>, >
shape: #ttnn.shape<8x64x160>
tensor<[8,64,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (16, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x5>>, >
shape: #ttnn.shape<8x64x160>
tensor<[8,64,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (16, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<230x1>>, >
shape: #ttnn.shape<8x920x32>
tensor<[8,920,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 920 + d1, d2), memory_config: (230, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x1>>, >
shape: #ttnn.shape<960x3x7>
tensor<[960,3,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3 + d1, d2), memory_config: (90, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<960x7x7>
tensor<[960,7,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x16x64>
tensor<[1,1,16,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x7>>, >
shape: #ttnn.shape<1x3x224x224>
tensor<[1,3,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 224 + d2, d3), memory_config: (21, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x128>
tensor<[1,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x7x64>
tensor<[1,1,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<1x200x14x14>
tensor<[1,200,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2800 + d1 * 14 + d2, d3), memory_config: (88, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<231x1>>, >
shape: #ttnn.shape<1x528x14x14>
tensor<[1,528,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7392 + d1 * 14 + d2, d3), memory_config: (231, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x128>
tensor<[1,12,1,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x2x64>
tensor<[1,12,2,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 2 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x12x11x64>
tensor<[1,12,11,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 132 + d1 * 11 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x12x12x64>
tensor<[1,12,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x12x13x64>
tensor<[1,12,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 156 + d1 * 13 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x12x12x128>
tensor<[1,12,12,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x12x14x64>
tensor<[1,12,14,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 14 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x12x15x64>
tensor<[1,12,15,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 180 + d1 * 15 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x12x16x64>
tensor<[1,12,16,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 16 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<1x12x17x64>
tensor<[1,12,17,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 204 + d1 * 17 + d2, d3), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<1x12x18x64>
tensor<[1,12,18,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 18 + d2, d3), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x12x19x64>
tensor<[1,12,19,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 228 + d1 * 19 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x12x20x64>
tensor<[1,12,20,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 * 20 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x12x3x64>
tensor<[1,12,3,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36 + d1 * 3 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x12x4x64>
tensor<[1,12,4,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 48 + d1 * 4 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x12x5x64>
tensor<[1,12,5,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 60 + d1 * 5 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x2>>, >
shape: #ttnn.shape<1x12x46x64>
tensor<[1,12,46,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 552 + d1 * 46 + d2, d3), memory_config: (18, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x2>>, >
shape: #ttnn.shape<1x12x47x64>
tensor<[1,12,47,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 564 + d1 * 47 + d2, d3), memory_config: (18, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x2>>, >
shape: #ttnn.shape<1x12x48x64>
tensor<[1,12,48,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 48 + d2, d3), memory_config: (18, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<1x12x49x64>
tensor<[1,12,49,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 588 + d1 * 49 + d2, d3), memory_config: (19, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<1x12x50x64>
tensor<[1,12,50,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 600 + d1 * 50 + d2, d3), memory_config: (19, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x12x6x64>
tensor<[1,12,6,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 * 6 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x2>>, >
shape: #ttnn.shape<1x12x51x64>
tensor<[1,12,51,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 612 + d1 * 51 + d2, d3), memory_config: (20, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x2>>, >
shape: #ttnn.shape<1x12x52x64>
tensor<[1,12,52,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 624 + d1 * 52 + d2, d3), memory_config: (20, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x2>>, >
shape: #ttnn.shape<1x12x53x64>
tensor<[1,12,53,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 636 + d1 * 53 + d2, d3), memory_config: (20, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x2>>, >
shape: #ttnn.shape<1x12x54x64>
tensor<[1,12,54,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 648 + d1 * 54 + d2, d3), memory_config: (21, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x2>>, >
shape: #ttnn.shape<1x12x55x64>
tensor<[1,12,55,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 660 + d1 * 55 + d2, d3), memory_config: (21, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x2>>, >
shape: #ttnn.shape<1x12x56x64>
tensor<[1,12,56,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 56 + d2, d3), memory_config: (21, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x2>>, >
shape: #ttnn.shape<1x24x56x56>
tensor<[1,24,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 56 + d2, d3), memory_config: (42, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<22x2>>, >
shape: #ttnn.shape<1x12x57x64>
tensor<[1,12,57,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 684 + d1 * 57 + d2, d3), memory_config: (22, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<22x2>>, >
shape: #ttnn.shape<1x12x58x64>
tensor<[1,12,58,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 696 + d1 * 58 + d2, d3), memory_config: (22, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<23x2>>, >
shape: #ttnn.shape<1x12x59x64>
tensor<[1,12,59,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 708 + d1 * 59 + d2, d3), memory_config: (23, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<23x2>>, >
shape: #ttnn.shape<1x12x60x64>
tensor<[1,12,60,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 720 + d1 * 60 + d2, d3), memory_config: (23, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x12x7x64>
tensor<[1,12,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 7 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<23x2>>, >
shape: #ttnn.shape<1x12x61x64>
tensor<[1,12,61,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 732 + d1 * 61 + d2, d3), memory_config: (23, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x2>>, >
shape: #ttnn.shape<1x12x62x64>
tensor<[1,12,62,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 744 + d1 * 62 + d2, d3), memory_config: (24, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x2>>, >
shape: #ttnn.shape<1x12x63x64>
tensor<[1,12,63,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 756 + d1 * 63 + d2, d3), memory_config: (24, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x2>>, >
shape: #ttnn.shape<1x12x64x64>
tensor<[1,12,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x12x65x64>
tensor<[1,12,65,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 780 + d1 * 65 + d2, d3), memory_config: (25, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x12x66x64>
tensor<[1,12,66,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 66 + d2, d3), memory_config: (25, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<26x2>>, >
shape: #ttnn.shape<1x12x67x64>
tensor<[1,12,67,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 804 + d1 * 67 + d2, d3), memory_config: (26, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<26x2>>, >
shape: #ttnn.shape<1x12x68x64>
tensor<[1,12,68,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 816 + d1 * 68 + d2, d3), memory_config: (26, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<26x2>>, >
shape: #ttnn.shape<1x12x69x64>
tensor<[1,12,69,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 828 + d1 * 69 + d2, d3), memory_config: (26, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<27x2>>, >
shape: #ttnn.shape<1x12x70x64>
tensor<[1,12,70,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 840 + d1 * 70 + d2, d3), memory_config: (27, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x12x8x64>
tensor<[1,12,8,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 8 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<27x2>>, >
shape: #ttnn.shape<1x12x71x64>
tensor<[1,12,71,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 852 + d1 * 71 + d2, d3), memory_config: (27, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<27x2>>, >
shape: #ttnn.shape<1x12x72x64>
tensor<[1,12,72,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 72 + d2, d3), memory_config: (27, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x2>>, >
shape: #ttnn.shape<1x12x73x64>
tensor<[1,12,73,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 876 + d1 * 73 + d2, d3), memory_config: (28, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x2>>, >
shape: #ttnn.shape<1x12x74x64>
tensor<[1,12,74,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 888 + d1 * 74 + d2, d3), memory_config: (28, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x2>>, >
shape: #ttnn.shape<1x12x75x64>
tensor<[1,12,75,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 900 + d1 * 75 + d2, d3), memory_config: (29, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x2>>, >
shape: #ttnn.shape<1x12x76x64>
tensor<[1,12,76,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 912 + d1 * 76 + d2, d3), memory_config: (29, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x2>>, >
shape: #ttnn.shape<1x12x77x64>
tensor<[1,12,77,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 924 + d1 * 77 + d2, d3), memory_config: (29, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x12x78x64>
tensor<[1,12,78,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 936 + d1 * 78 + d2, d3), memory_config: (30, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x12x79x64>
tensor<[1,12,79,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 948 + d1 * 79 + d2, d3), memory_config: (30, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x12x80x64>
tensor<[1,12,80,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 80 + d2, d3), memory_config: (30, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x12x9x64>
tensor<[1,12,9,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 108 + d1 * 9 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<31x2>>, >
shape: #ttnn.shape<1x12x81x64>
tensor<[1,12,81,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 972 + d1 * 81 + d2, d3), memory_config: (31, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<31x2>>, >
shape: #ttnn.shape<1x12x82x64>
tensor<[1,12,82,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 984 + d1 * 82 + d2, d3), memory_config: (31, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x12x83x64>
tensor<[1,12,83,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 996 + d1 * 83 + d2, d3), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x12x84x64>
tensor<[1,12,84,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 84 + d2, d3), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x12x85x64>
tensor<[1,12,85,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1020 + d1 * 85 + d2, d3), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<33x2>>, >
shape: #ttnn.shape<1x12x86x64>
tensor<[1,12,86,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1032 + d1 * 86 + d2, d3), memory_config: (33, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<33x2>>, >
shape: #ttnn.shape<1x12x87x64>
tensor<[1,12,87,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1044 + d1 * 87 + d2, d3), memory_config: (33, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<33x2>>, >
shape: #ttnn.shape<1x12x88x64>
tensor<[1,12,88,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1056 + d1 * 88 + d2, d3), memory_config: (33, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<34x2>>, >
shape: #ttnn.shape<1x12x89x64>
tensor<[1,12,89,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1068 + d1 * 89 + d2, d3), memory_config: (34, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<34x2>>, >
shape: #ttnn.shape<1x12x90x64>
tensor<[1,12,90,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 90 + d2, d3), memory_config: (34, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x12x10x64>
tensor<[1,12,10,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 10 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x2>>, >
shape: #ttnn.shape<1x12x91x64>
tensor<[1,12,91,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1092 + d1 * 91 + d2, d3), memory_config: (35, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x2>>, >
shape: #ttnn.shape<1x12x92x64>
tensor<[1,12,92,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1104 + d1 * 92 + d2, d3), memory_config: (35, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x2>>, >
shape: #ttnn.shape<1x12x93x64>
tensor<[1,12,93,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1116 + d1 * 93 + d2, d3), memory_config: (35, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<36x2>>, >
shape: #ttnn.shape<1x12x94x64>
tensor<[1,12,94,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1128 + d1 * 94 + d2, d3), memory_config: (36, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<36x2>>, >
shape: #ttnn.shape<1x12x95x64>
tensor<[1,12,95,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1140 + d1 * 95 + d2, d3), memory_config: (36, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<36x2>>, >
shape: #ttnn.shape<1x12x96x64>
tensor<[1,12,96,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1152 + d1 * 96 + d2, d3), memory_config: (36, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<37x2>>, >
shape: #ttnn.shape<1x12x97x64>
tensor<[1,12,97,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1164 + d1 * 97 + d2, d3), memory_config: (37, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<37x2>>, >
shape: #ttnn.shape<1x12x98x64>
tensor<[1,12,98,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1176 + d1 * 98 + d2, d3), memory_config: (37, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x2>>, >
shape: #ttnn.shape<1x12x99x64>
tensor<[1,12,99,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1188 + d1 * 99 + d2, d3), memory_config: (38, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x3>>, >
shape: #ttnn.shape<1x16128x85>
tensor<[1,16128,85,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16128 + d1, d2), memory_config: (504, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x4>>, >
shape: #ttnn.shape<1x256x112x112>
tensor<[1,256,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 112 + d2, d3), memory_config: (896, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x128x128x768>
tensor<[1,128,128,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x448x28x28>
tensor<[1,448,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 28 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x1>>, >
shape: #ttnn.shape<1x185x28x28>
tensor<[1,185,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5180 + d1 * 28 + d2, d3), memory_config: (162, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x480x28x28>
tensor<[1,480,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 28 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x480x28x28>
tensor<[1,480,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 28 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x448x28x28>
tensor<[1,448,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 28 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x416x28x28>
tensor<[1,416,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 28 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x384x28x28>
tensor<[1,384,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 28 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x352x28x28>
tensor<[1,352,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 28 + d2, d3), memory_config: (308, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x320x28x28>
tensor<[1,320,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 28 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x288x28x28>
tensor<[1,288,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 28 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x224x28x28>
tensor<[1,224,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 28 + d2, d3), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x192x28x28>
tensor<[1,192,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 28 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x160x28x28>
tensor<[1,160,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 28 + d2, d3), memory_config: (140, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x2>>, >
shape: #ttnn.shape<1x384x64x64>
tensor<[1,384,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 64 + d2, d3), memory_config: (768, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x2560x16x16>
tensor<[1,2560,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 16 + d2, d3), memory_config: (1280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x1920x16x16>
tensor<[1,1920,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 16 + d2, d3), memory_config: (960, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x1>>, >
shape: #ttnn.shape<1x1920x32x32>
tensor<[1,1920,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 32 + d2, d3), memory_config: (1920, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2560x8x8>
tensor<[1,2560,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 8 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x13x128>
tensor<[1,13,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x14x14x1024>
tensor<[1,14,14,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<217x2>>, >
shape: #ttnn.shape<1x124x56x56>
tensor<[1,124,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6944 + d1 * 56 + d2, d3), memory_config: (217, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<249x2>>, >
shape: #ttnn.shape<1x142x56x56>
tensor<[1,142,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7952 + d1 * 56 + d2, d3), memory_config: (249, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<179x2>>, >
shape: #ttnn.shape<1x102x56x56>
tensor<[1,102,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5712 + d1 * 56 + d2, d3), memory_config: (179, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<95x2>>, >
shape: #ttnn.shape<1x54x56x56>
tensor<[1,54,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3024 + d1 * 56 + d2, d3), memory_config: (95, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<137x2>>, >
shape: #ttnn.shape<1x78x56x56>
tensor<[1,78,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4368 + d1 * 56 + d2, d3), memory_config: (137, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x1>>, >
shape: #ttnn.shape<1x3072x32x32>
tensor<[1,3072,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 98304 + d1 * 32 + d2, d3), memory_config: (3072, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x16x2x64>
tensor<[1,16,2,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 * 2 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x16x11x64>
tensor<[1,16,11,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 176 + d1 * 11 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x16x11x64>
tensor<[1,16,11,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 176 + d1 * 11 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x16x12x64>
tensor<[1,16,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 12 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x16x12x64>
tensor<[1,16,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 12 + d2, d3), memory_config: (6, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<1x16x13x64>
tensor<[1,16,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 208 + d1 * 13 + d2, d3), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<1x16x13x64>
tensor<[1,16,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 208 + d1 * 13 + d2, d3), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<1x16x14x64>
tensor<[1,16,14,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 14 + d2, d3), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<1x16x14x64>
tensor<[1,16,14,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 14 + d2, d3), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x16x15x64>
tensor<[1,16,15,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 * 15 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x16x15x64>
tensor<[1,16,15,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 * 15 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x16x16x64>
tensor<[1,16,16,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x16x16x64>
tensor<[1,16,16,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x2>>, >
shape: #ttnn.shape<1x16x17x64>
tensor<[1,16,17,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 272 + d1 * 17 + d2, d3), memory_config: (9, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x2>>, >
shape: #ttnn.shape<1x16x17x64>
tensor<[1,16,17,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 272 + d1 * 17 + d2, d3), memory_config: (9, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x16x16x3072>
tensor<[1,16,16,3072,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (8, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x2>>, >
shape: #ttnn.shape<1x16x18x64>
tensor<[1,16,18,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 288 + d1 * 18 + d2, d3), memory_config: (9, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x2>>, >
shape: #ttnn.shape<1x16x18x64>
tensor<[1,16,18,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 288 + d1 * 18 + d2, d3), memory_config: (9, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x16x19x64>
tensor<[1,16,19,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 304 + d1 * 19 + d2, d3), memory_config: (10, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x16x19x64>
tensor<[1,16,19,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 304 + d1 * 19 + d2, d3), memory_config: (10, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x16x20x64>
tensor<[1,16,20,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 20 + d2, d3), memory_config: (10, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x16x20x64>
tensor<[1,16,20,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 20 + d2, d3), memory_config: (10, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x16x3x64>
tensor<[1,16,3,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 48 + d1 * 3 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x2>>, >
shape: #ttnn.shape<1x16x21x64>
tensor<[1,16,21,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 21 + d2, d3), memory_config: (11, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x2>>, >
shape: #ttnn.shape<1x16x21x64>
tensor<[1,16,21,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 21 + d2, d3), memory_config: (11, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x2>>, >
shape: #ttnn.shape<1x16x22x64>
tensor<[1,16,22,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 352 + d1 * 22 + d2, d3), memory_config: (11, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x2>>, >
shape: #ttnn.shape<1x16x22x64>
tensor<[1,16,22,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 352 + d1 * 22 + d2, d3), memory_config: (11, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x2>>, >
shape: #ttnn.shape<1x16x23x64>
tensor<[1,16,23,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 368 + d1 * 23 + d2, d3), memory_config: (12, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x2>>, >
shape: #ttnn.shape<1x16x23x64>
tensor<[1,16,23,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 368 + d1 * 23 + d2, d3), memory_config: (12, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x2>>, >
shape: #ttnn.shape<1x16x24x64>
tensor<[1,16,24,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 24 + d2, d3), memory_config: (12, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x2>>, >
shape: #ttnn.shape<1x16x24x64>
tensor<[1,16,24,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 24 + d2, d3), memory_config: (12, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13x2>>, >
shape: #ttnn.shape<1x16x25x64>
tensor<[1,16,25,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 25 + d2, d3), memory_config: (13, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13x2>>, >
shape: #ttnn.shape<1x16x26x64>
tensor<[1,16,26,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 416 + d1 * 26 + d2, d3), memory_config: (13, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x2>>, >
shape: #ttnn.shape<1x16x27x64>
tensor<[1,16,27,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 27 + d2, d3), memory_config: (14, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x2>>, >
shape: #ttnn.shape<1x16x28x64>
tensor<[1,16,28,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x144x28x28>
tensor<[1,144,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 28 + d2, d3), memory_config: (126, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<230x1>>, >
shape: #ttnn.shape<1x262x28x28>
tensor<[1,262,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7336 + d1 * 28 + d2, d3), memory_config: (230, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<151x1>>, >
shape: #ttnn.shape<1x172x28x28>
tensor<[1,172,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4816 + d1 * 28 + d2, d3), memory_config: (151, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<191x1>>, >
shape: #ttnn.shape<1x218x28x28>
tensor<[1,218,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6104 + d1 * 28 + d2, d3), memory_config: (191, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<259x1>>, >
shape: #ttnn.shape<1x296x28x28>
tensor<[1,296,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8288 + d1 * 28 + d2, d3), memory_config: (259, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<107x1>>, >
shape: #ttnn.shape<1x122x28x28>
tensor<[1,122,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3416 + d1 * 28 + d2, d3), memory_config: (107, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<55x1>>, >
shape: #ttnn.shape<1x62x28x28>
tensor<[1,62,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1736 + d1 * 28 + d2, d3), memory_config: (55, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<83x1>>, >
shape: #ttnn.shape<1x94x28x28>
tensor<[1,94,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2632 + d1 * 28 + d2, d3), memory_config: (83, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x2>>, >
shape: #ttnn.shape<1x16x29x64>
tensor<[1,16,29,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 464 + d1 * 29 + d2, d3), memory_config: (15, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x16x4x64>
tensor<[1,16,4,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 4 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x16x5x64>
tensor<[1,16,5,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 5 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x16x6x64>
tensor<[1,16,6,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 6 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x16x7x64>
tensor<[1,16,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 112 + d1 * 7 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x16x7x64>
tensor<[1,16,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 112 + d1 * 7 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x16x8x64>
tensor<[1,16,8,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 8 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x16x8x64>
tensor<[1,16,8,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 8 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x16x9x64>
tensor<[1,16,9,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x16x9x64>
tensor<[1,16,9,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x16x10x64>
tensor<[1,16,10,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 160 + d1 * 10 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x16x10x64>
tensor<[1,16,10,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 160 + d1 * 10 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<172x1>>, >
shape: #ttnn.shape<1x782x7x7>
tensor<[1,782,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5474 + d1 * 7 + d2, d3), memory_config: (172, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<235x1>>, >
shape: #ttnn.shape<1x1072x7x7>
tensor<[1,1072,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7504 + d1 * 7 + d2, d3), memory_config: (235, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<175x1>>, >
shape: #ttnn.shape<1x800x7x7>
tensor<[1,800,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5600 + d1 * 7 + d2, d3), memory_config: (175, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x8>>, >
shape: #ttnn.shape<1x384x256x256>
tensor<[1,384,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 98304 + d1 * 256 + d2, d3), memory_config: (3072, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x2>>, >
shape: #ttnn.shape<1x384x35x35>
tensor<[1,384,35,35,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 35 + d2, d3), memory_config: (420, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x1536x8x8>
tensor<[1,1536,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12288 + d1 * 8 + d2, d3), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x2>
tensor<[1,2,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x1x128>
tensor<[1,2,1,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x13x128>
tensor<[1,2,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 26 + d1 * 13 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x12x128>
tensor<[1,2,12,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 12 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x14x128>
tensor<[1,2,14,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28 + d1 * 14 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x15x128>
tensor<[1,2,15,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30 + d1 * 15 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x16x128>
tensor<[1,2,16,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 * 16 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x17x128>
tensor<[1,2,17,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 34 + d1 * 17 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x18x128>
tensor<[1,2,18,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36 + d1 * 18 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x19x128>
tensor<[1,2,19,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38 + d1 * 19 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x20x128>
tensor<[1,2,20,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40 + d1 * 20 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x21x128>
tensor<[1,2,21,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 42 + d1 * 21 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x22x128>
tensor<[1,2,22,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 44 + d1 * 22 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x23x128>
tensor<[1,2,23,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46 + d1 * 23 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x24x128>
tensor<[1,2,24,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 48 + d1 * 24 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x25x128>
tensor<[1,2,25,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50 + d1 * 25 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x26x128>
tensor<[1,2,26,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 26 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x27x128>
tensor<[1,2,27,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 54 + d1 * 27 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x28x128>
tensor<[1,2,28,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 28 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x2x29x128>
tensor<[1,2,29,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 58 + d1 * 29 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x328x28x28>
tensor<[1,328,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9184 + d1 * 28 + d2, d3), memory_config: (287, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x40x28x28>
tensor<[1,40,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 28 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<242x1>>, >
shape: #ttnn.shape<1x276x28x28>
tensor<[1,276,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7728 + d1 * 28 + d2, d3), memory_config: (242, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<272x1>>, >
shape: #ttnn.shape<1x310x28x28>
tensor<[1,310,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8680 + d1 * 28 + d2, d3), memory_config: (272, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x368x28x28>
tensor<[1,368,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 28 + d2, d3), memory_config: (322, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<408x1>>, >
shape: #ttnn.shape<1x466x28x28>
tensor<[1,466,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13048 + d1 * 28 + d2, d3), memory_config: (408, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<133x1>>, >
shape: #ttnn.shape<1x152x28x28>
tensor<[1,152,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4256 + d1 * 28 + d2, d3), memory_config: (133, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<69x1>>, >
shape: #ttnn.shape<1x78x28x28>
tensor<[1,78,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2184 + d1 * 28 + d2, d3), memory_config: (69, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<104x1>>, >
shape: #ttnn.shape<1x118x28x28>
tensor<[1,118,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3304 + d1 * 28 + d2, d3), memory_config: (104, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x4>>, >
shape: #ttnn.shape<1x48x112x112>
tensor<[1,48,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 112 + d2, d3), memory_config: (168, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x4>>, >
shape: #ttnn.shape<1x24x32x128>
tensor<[1,24,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x3234x4>
tensor<[1,3234,4,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3234 + d1, d2), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x3>>, >
shape: #ttnn.shape<1x3234x91>
tensor<[1,3234,91,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3234 + d1, d2), memory_config: (102, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4096x4>>, >
shape: #ttnn.shape<1x1024x128x128>
tensor<[1,1024,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 131072 + d1 * 128 + d2, d3), memory_config: (4096, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x896x14x14>
tensor<[1,896,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 14 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<784x1>>, >
shape: #ttnn.shape<1x1792x14x14>
tensor<[1,1792,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25088 + d1 * 14 + d2, d3), memory_config: (784, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<770x1>>, >
shape: #ttnn.shape<1x1760x14x14>
tensor<[1,1760,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24640 + d1 * 14 + d2, d3), memory_config: (770, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<756x1>>, >
shape: #ttnn.shape<1x1728x14x14>
tensor<[1,1728,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24192 + d1 * 14 + d2, d3), memory_config: (756, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<742x1>>, >
shape: #ttnn.shape<1x1696x14x14>
tensor<[1,1696,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23744 + d1 * 14 + d2, d3), memory_config: (742, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<728x1>>, >
shape: #ttnn.shape<1x1664x14x14>
tensor<[1,1664,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23296 + d1 * 14 + d2, d3), memory_config: (728, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<714x1>>, >
shape: #ttnn.shape<1x1632x14x14>
tensor<[1,1632,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22848 + d1 * 14 + d2, d3), memory_config: (714, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<700x1>>, >
shape: #ttnn.shape<1x1600x14x14>
tensor<[1,1600,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22400 + d1 * 14 + d2, d3), memory_config: (700, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<686x1>>, >
shape: #ttnn.shape<1x1568x14x14>
tensor<[1,1568,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21952 + d1 * 14 + d2, d3), memory_config: (686, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x1536x14x14>
tensor<[1,1536,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 14 + d2, d3), memory_config: (672, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<658x1>>, >
shape: #ttnn.shape<1x1504x14x14>
tensor<[1,1504,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21056 + d1 * 14 + d2, d3), memory_config: (658, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<644x1>>, >
shape: #ttnn.shape<1x1472x14x14>
tensor<[1,1472,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20608 + d1 * 14 + d2, d3), memory_config: (644, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<630x1>>, >
shape: #ttnn.shape<1x1440x14x14>
tensor<[1,1440,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20160 + d1 * 14 + d2, d3), memory_config: (630, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<616x1>>, >
shape: #ttnn.shape<1x1408x14x14>
tensor<[1,1408,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19712 + d1 * 14 + d2, d3), memory_config: (616, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<602x1>>, >
shape: #ttnn.shape<1x1376x14x14>
tensor<[1,1376,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19264 + d1 * 14 + d2, d3), memory_config: (602, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x1344x14x14>
tensor<[1,1344,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 14 + d2, d3), memory_config: (588, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<574x1>>, >
shape: #ttnn.shape<1x1312x14x14>
tensor<[1,1312,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18368 + d1 * 14 + d2, d3), memory_config: (574, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<560x1>>, >
shape: #ttnn.shape<1x1280x14x14>
tensor<[1,1280,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17920 + d1 * 14 + d2, d3), memory_config: (560, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<546x1>>, >
shape: #ttnn.shape<1x1248x14x14>
tensor<[1,1248,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17472 + d1 * 14 + d2, d3), memory_config: (546, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<532x1>>, >
shape: #ttnn.shape<1x1216x14x14>
tensor<[1,1216,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17024 + d1 * 14 + d2, d3), memory_config: (532, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<518x1>>, >
shape: #ttnn.shape<1x1184x14x14>
tensor<[1,1184,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16576 + d1 * 14 + d2, d3), memory_config: (518, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x1152x14x14>
tensor<[1,1152,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 14 + d2, d3), memory_config: (504, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<490x1>>, >
shape: #ttnn.shape<1x1120x14x14>
tensor<[1,1120,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15680 + d1 * 14 + d2, d3), memory_config: (490, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<476x1>>, >
shape: #ttnn.shape<1x1088x14x14>
tensor<[1,1088,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15232 + d1 * 14 + d2, d3), memory_config: (476, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<462x1>>, >
shape: #ttnn.shape<1x1056x14x14>
tensor<[1,1056,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14784 + d1 * 14 + d2, d3), memory_config: (462, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<434x1>>, >
shape: #ttnn.shape<1x992x14x14>
tensor<[1,992,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13888 + d1 * 14 + d2, d3), memory_config: (434, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x960x14x14>
tensor<[1,960,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 14 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x928x14x14>
tensor<[1,928,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 14 + d2, d3), memory_config: (406, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x896x14x14>
tensor<[1,896,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 14 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x864x14x14>
tensor<[1,864,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 14 + d2, d3), memory_config: (378, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x832x14x14>
tensor<[1,832,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 14 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x800x14x14>
tensor<[1,800,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 14 + d2, d3), memory_config: (350, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x736x14x14>
tensor<[1,736,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 14 + d2, d3), memory_config: (322, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x704x14x14>
tensor<[1,704,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 14 + d2, d3), memory_config: (308, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x640x14x14>
tensor<[1,640,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 14 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x608x14x14>
tensor<[1,608,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 14 + d2, d3), memory_config: (266, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x576x14x14>
tensor<[1,576,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 14 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x544x14x14>
tensor<[1,544,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 14 + d2, d3), memory_config: (238, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x448x14x14>
tensor<[1,448,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 14 + d2, d3), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<182x1>>, >
shape: #ttnn.shape<1x416x14x14>
tensor<[1,416,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5824 + d1 * 14 + d2, d3), memory_config: (182, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x384x14x14>
tensor<[1,384,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 14 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<154x1>>, >
shape: #ttnn.shape<1x352x14x14>
tensor<[1,352,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4928 + d1 * 14 + d2, d3), memory_config: (154, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x320x14x14>
tensor<[1,320,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 14 + d2, d3), memory_config: (140, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x288x14x14>
tensor<[1,288,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 14 + d2, d3), memory_config: (126, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x832x14x14>
tensor<[1,832,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 14 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<644x1>>, >
shape: #ttnn.shape<1x736x28x28>
tensor<[1,736,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20608 + d1 * 28 + d2, d3), memory_config: (644, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x512x32x32>
tensor<[1,512,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 32 + d2, d3), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<1x768x32x32>
tensor<[1,768,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 32 + d2, d3), memory_config: (768, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x2>>, >
shape: #ttnn.shape<1x512x56x56>
tensor<[1,512,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 56 + d2, d3), memory_config: (896, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<182x1>>, >
shape: #ttnn.shape<1x832x7x7>
tensor<[1,832,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5824 + d1 * 7 + d2, d3), memory_config: (182, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x512x8x8>
tensor<[1,512,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 8 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x1536x8x8>
tensor<[1,1536,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12288 + d1 * 8 + d2, d3), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x4>>, >
shape: #ttnn.shape<1x28x13x128>
tensor<[1,28,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x16>>, >
shape: #ttnn.shape<1x28x28x512>
tensor<[1,28,28,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x30>
tensor<[1,30,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x3>
tensor<[1,3,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x3>>, >
shape: #ttnn.shape<1x3x16x16x85>
tensor<[1,3,16,16,85,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x3>>, >
shape: #ttnn.shape<1x3x32x32x85>
tensor<[1,3,32,32,85,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x3>>, >
shape: #ttnn.shape<1x3x64x64x85>
tensor<[1,3,64,64,85,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x64x256x256>
tensor<[1,64,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 256 + d2, d3), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x32x32x1536>
tensor<[1,32,32,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x4>>, >
shape: #ttnn.shape<1x32x32x128>
tensor<[1,32,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x32x32x3072>
tensor<[1,32,32,3072,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x32x128>
tensor<[1,32,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x2>>, >
shape: #ttnn.shape<1x640x64x64>
tensor<[1,640,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 64 + d2, d3), memory_config: (1280, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<1x72x56x56>
tensor<[1,72,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 56 + d2, d3), memory_config: (126, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x4>>, >
shape: #ttnn.shape<1x768x128x128>
tensor<[1,768,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 98304 + d1 * 128 + d2, d3), memory_config: (3072, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<544x1>>, >
shape: #ttnn.shape<1x1024x17x17>
tensor<[1,1024,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17408 + d1 * 17 + d2, d3), memory_config: (544, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<544x1>>, >
shape: #ttnn.shape<1x1024x17x17>
tensor<[1,1024,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17408 + d1 * 17 + d2, d3), memory_config: (544, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5760x1>>, >
shape: #ttnn.shape<1x5760x32x32>
tensor<[1,5760,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 184320 + d1 * 32 + d2, d3), memory_config: (5760, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x4>
tensor<[1,4,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x4x13x128>
tensor<[1,4,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 13 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<69x1>>, >
shape: #ttnn.shape<1x156x14x14>
tensor<[1,156,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2184 + d1 * 14 + d2, d3), memory_config: (69, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<104x1>>, >
shape: #ttnn.shape<1x236x14x14>
tensor<[1,236,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3304 + d1 * 14 + d2, d3), memory_config: (104, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<158x1>>, >
shape: #ttnn.shape<1x360x14x14>
tensor<[1,360,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5040 + d1 * 14 + d2, d3), memory_config: (158, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x654x14x14>
tensor<[1,654,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9156 + d1 * 14 + d2, d3), memory_config: (287, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x80x14x14>
tensor<[1,80,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 14 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<324x1>>, >
shape: #ttnn.shape<1x740x14x14>
tensor<[1,740,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10360 + d1 * 14 + d2, d3), memory_config: (324, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x544x14x14>
tensor<[1,544,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 14 + d2, d3), memory_config: (238, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<133x1>>, >
shape: #ttnn.shape<1x304x14x14>
tensor<[1,304,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4256 + d1 * 14 + d2, d3), memory_config: (133, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<188x1>>, >
shape: #ttnn.shape<1x428x14x14>
tensor<[1,428,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5992 + d1 * 14 + d2, d3), memory_config: (188, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x46>
tensor<[1,46,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x47>
tensor<[1,47,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x48>
tensor<[1,48,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x49>
tensor<[1,49,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x50>
tensor<[1,50,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5>
tensor<[1,5,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x5x16x64>
tensor<[1,5,16,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (3, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x51>
tensor<[1,51,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<476x1>>, >
shape: #ttnn.shape<1x1088x14x14>
tensor<[1,1088,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15232 + d1 * 14 + d2, d3), memory_config: (476, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x1024x28x28>
tensor<[1,1024,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 28 + d2, d3), memory_config: (896, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x52>
tensor<[1,52,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x53>
tensor<[1,53,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x54>
tensor<[1,54,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x55>
tensor<[1,55,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x56>
tensor<[1,56,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x112x14x14>
tensor<[1,112,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 14 + d2, d3), memory_config: (49, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x57>
tensor<[1,57,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x58>
tensor<[1,58,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x59>
tensor<[1,59,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x60>
tensor<[1,60,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x6x2x64>
tensor<[1,6,2,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 * 2 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x6x11x64>
tensor<[1,6,11,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 66 + d1 * 11 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x6x12x64>
tensor<[1,6,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 * 12 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x6x13x64>
tensor<[1,6,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 78 + d1 * 13 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x6x14x64>
tensor<[1,6,14,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 14 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x6x15x64>
tensor<[1,6,15,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 90 + d1 * 15 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x6x16x64>
tensor<[1,6,16,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 16 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x6x17x64>
tensor<[1,6,17,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 102 + d1 * 17 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x6x18x64>
tensor<[1,6,18,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 108 + d1 * 18 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x6x19x64>
tensor<[1,6,19,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 114 + d1 * 19 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x6x20x64>
tensor<[1,6,20,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 20 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x6x3x64>
tensor<[1,6,3,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18 + d1 * 3 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x6x4x64>
tensor<[1,6,4,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 4 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x6x5x64>
tensor<[1,6,5,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30 + d1 * 5 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x6x6x64>
tensor<[1,6,6,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36 + d1 * 6 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x6x7x64>
tensor<[1,6,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 42 + d1 * 7 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x6x8x64>
tensor<[1,6,8,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 48 + d1 * 8 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x6x9x64>
tensor<[1,6,9,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 54 + d1 * 9 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x6x10x64>
tensor<[1,6,10,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 60 + d1 * 10 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x61>
tensor<[1,61,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x62>
tensor<[1,62,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x63>
tensor<[1,63,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x5>>, >
shape: #ttnn.shape<1x128x120x160>
tensor<[1,128,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 120 + d2, d3), memory_config: (480, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x7>>, >
shape: #ttnn.shape<1x128x224x224>
tensor<[1,128,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 224 + d2, d3), memory_config: (896, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x2>>, >
shape: #ttnn.shape<1x128x30x40>
tensor<[1,128,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 30 + d2, d3), memory_config: (120, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<784x2>>, >
shape: #ttnn.shape<1x448x56x56>
tensor<[1,448,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25088 + d1 * 56 + d2, d3), memory_config: (784, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x224x56x56>
tensor<[1,224,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 56 + d2, d3), memory_config: (392, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x2>>, >
shape: #ttnn.shape<1x192x56x56>
tensor<[1,192,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 56 + d2, d3), memory_config: (336, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x2>>, >
shape: #ttnn.shape<1x160x56x56>
tensor<[1,160,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 56 + d2, d3), memory_config: (280, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x2>>, >
shape: #ttnn.shape<1x96x56x56>
tensor<[1,96,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 56 + d2, d3), memory_config: (168, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x3>>, >
shape: #ttnn.shape<1x128x60x80>
tensor<[1,128,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 60 + d2, d3), memory_config: (240, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x64x64x768>
tensor<[1,64,64,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x64x64x1536>
tensor<[1,64,64,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<365x3>>, >
shape: #ttnn.shape<1x160x73x73>
tensor<[1,160,73,73,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11680 + d1 * 73 + d2, d3), memory_config: (365, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x960x32x32>
tensor<[1,960,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 32 + d2, d3), memory_config: (960, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x1280x32x32>
tensor<[1,1280,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 32 + d2, d3), memory_config: (1280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x2>>, >
shape: #ttnn.shape<1x960x64x64>
tensor<[1,960,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 64 + d2, d3), memory_config: (1920, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x65>
tensor<[1,65,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x66>
tensor<[1,66,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x67>
tensor<[1,67,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x68>
tensor<[1,68,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x69>
tensor<[1,69,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x70>
tensor<[1,70,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x7x7x2048>
tensor<[1,7,7,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x71>
tensor<[1,71,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x71x7x64>
tensor<[1,71,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x72>
tensor<[1,72,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x73>
tensor<[1,73,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x74>
tensor<[1,74,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x75>
tensor<[1,75,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x76>
tensor<[1,76,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3072x2>>, >
shape: #ttnn.shape<1x1536x64x64>
tensor<[1,1536,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 98304 + d1 * 64 + d2, d3), memory_config: (3072, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x1440x7x7>
tensor<[1,1440,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 7 + d2, d3), memory_config: (315, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x77>
tensor<[1,77,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x78>
tensor<[1,78,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x79>
tensor<[1,79,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x80>
tensor<[1,80,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x8x2x64>
tensor<[1,8,2,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 2 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x8x11x64>
tensor<[1,8,11,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 88 + d1 * 11 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x8x12x64>
tensor<[1,8,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 12 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x8x13x64>
tensor<[1,8,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 104 + d1 * 13 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x8x14x64>
tensor<[1,8,14,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 112 + d1 * 14 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x8x15x64>
tensor<[1,8,15,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 15 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x8x16x64>
tensor<[1,8,16,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 16 + d2, d3), memory_config: (4, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x8x17x64>
tensor<[1,8,17,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 136 + d1 * 17 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x8x18x64>
tensor<[1,8,18,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 18 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x8x19x64>
tensor<[1,8,19,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 152 + d1 * 19 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x8x20x64>
tensor<[1,8,20,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 160 + d1 * 20 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x8x3x64>
tensor<[1,8,3,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 3 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x8x4x64>
tensor<[1,8,4,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 * 4 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x4>>, >
shape: #ttnn.shape<1x8x32x128>
tensor<[1,8,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (8, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x8x5x64>
tensor<[1,8,5,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40 + d1 * 5 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x8x6x64>
tensor<[1,8,6,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 48 + d1 * 6 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x8x7x64>
tensor<[1,8,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 7 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x8x8x64>
tensor<[1,8,8,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x8x9x64>
tensor<[1,8,9,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 * 9 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x8x10x64>
tensor<[1,8,10,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 10 + d2, d3), memory_config: (3, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x81>
tensor<[1,81,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x82>
tensor<[1,82,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x83>
tensor<[1,83,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x84>
tensor<[1,84,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x85>
tensor<[1,85,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x86>
tensor<[1,86,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x87>
tensor<[1,87,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x88>
tensor<[1,88,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x89>
tensor<[1,89,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x1920x7x7>
tensor<[1,1920,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 7 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<413x1>>, >
shape: #ttnn.shape<1x1888x7x7>
tensor<[1,1888,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13216 + d1 * 7 + d2, d3), memory_config: (413, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x1856x7x7>
tensor<[1,1856,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 7 + d2, d3), memory_config: (406, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<399x1>>, >
shape: #ttnn.shape<1x1824x7x7>
tensor<[1,1824,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12768 + d1 * 7 + d2, d3), memory_config: (399, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x1792x7x7>
tensor<[1,1792,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 7 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<385x1>>, >
shape: #ttnn.shape<1x1760x7x7>
tensor<[1,1760,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12320 + d1 * 7 + d2, d3), memory_config: (385, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x1728x7x7>
tensor<[1,1728,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 7 + d2, d3), memory_config: (378, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<371x1>>, >
shape: #ttnn.shape<1x1696x7x7>
tensor<[1,1696,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11872 + d1 * 7 + d2, d3), memory_config: (371, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x1664x7x7>
tensor<[1,1664,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 7 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<357x1>>, >
shape: #ttnn.shape<1x1632x7x7>
tensor<[1,1632,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11424 + d1 * 7 + d2, d3), memory_config: (357, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x1600x7x7>
tensor<[1,1600,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 7 + d2, d3), memory_config: (350, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<343x1>>, >
shape: #ttnn.shape<1x1568x7x7>
tensor<[1,1568,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10976 + d1 * 7 + d2, d3), memory_config: (343, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x1536x7x7>
tensor<[1,1536,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 7 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<329x1>>, >
shape: #ttnn.shape<1x1504x7x7>
tensor<[1,1504,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10528 + d1 * 7 + d2, d3), memory_config: (329, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x1472x7x7>
tensor<[1,1472,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 7 + d2, d3), memory_config: (322, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x1440x7x7>
tensor<[1,1440,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 7 + d2, d3), memory_config: (315, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x1408x7x7>
tensor<[1,1408,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 7 + d2, d3), memory_config: (308, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<301x1>>, >
shape: #ttnn.shape<1x1376x7x7>
tensor<[1,1376,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9632 + d1 * 7 + d2, d3), memory_config: (301, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x1344x7x7>
tensor<[1,1344,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 7 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x1312x7x7>
tensor<[1,1312,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9184 + d1 * 7 + d2, d3), memory_config: (287, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<273x1>>, >
shape: #ttnn.shape<1x1248x7x7>
tensor<[1,1248,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8736 + d1 * 7 + d2, d3), memory_config: (273, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x1216x7x7>
tensor<[1,1216,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 7 + d2, d3), memory_config: (266, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<259x1>>, >
shape: #ttnn.shape<1x1184x7x7>
tensor<[1,1184,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8288 + d1 * 7 + d2, d3), memory_config: (259, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x1152x7x7>
tensor<[1,1152,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 7 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x1>>, >
shape: #ttnn.shape<1x1120x7x7>
tensor<[1,1120,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 7 + d2, d3), memory_config: (245, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x1088x7x7>
tensor<[1,1088,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 7 + d2, d3), memory_config: (238, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<231x1>>, >
shape: #ttnn.shape<1x1056x7x7>
tensor<[1,1056,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7392 + d1 * 7 + d2, d3), memory_config: (231, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<217x1>>, >
shape: #ttnn.shape<1x992x7x7>
tensor<[1,992,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6944 + d1 * 7 + d2, d3), memory_config: (217, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x928x7x7>
tensor<[1,928,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6496 + d1 * 7 + d2, d3), memory_config: (203, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x90>
tensor<[1,90,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7>>, >
shape: #ttnn.shape<1x201>
tensor<[1,201,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 7, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x91>
tensor<[1,91,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x92>
tensor<[1,92,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x184x14x14>
tensor<[1,184,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2576 + d1 * 14 + d2, d3), memory_config: (81, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x93>
tensor<[1,93,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x94>
tensor<[1,94,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x95>
tensor<[1,95,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x96>
tensor<[1,96,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x2>>, >
shape: #ttnn.shape<1x384x35x35>
tensor<[1,384,35,35,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 35 + d2, d3), memory_config: (420, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<426x3>>, >
shape: #ttnn.shape<1x192x71x71>
tensor<[1,192,71,71,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13632 + d1 * 71 + d2, d3), memory_config: (426, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x97>
tensor<[1,97,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x98>
tensor<[1,98,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x99>
tensor<[1,99,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x100>
tensor<[1,100,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<4x13x768>
tensor<[4,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x13>
tensor<[4,13,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x2>
tensor<[3234,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1024>>, >
shape: #ttnn.shape<1x1x100x1024>
tensor<[1,1,100,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1024>>, >
shape: #ttnn.shape<1x1x100x1024>
tensor<[1,1,100,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1536>>, >
shape: #ttnn.shape<1x1x100x1536>
tensor<[1,1,100,1536,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1536, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x256>>, >
shape: #ttnn.shape<1x1x16384x256>
tensor<[1,1,16384,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x2048>>, >
shape: #ttnn.shape<1x1x196x2048>
tensor<[1,1,196,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1024>>, >
shape: #ttnn.shape<1x1x256x1024>
tensor<[1,1,256,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x1024x16x16>
tensor<[1,1024,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16 + d2, d3), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x255>>, >
shape: #ttnn.shape<1x1x256x255>
tensor<[1,1,256,255,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 255, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x255x16x16>
tensor<[1,255,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4080 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x512>>, >
shape: #ttnn.shape<1x1x256x512>
tensor<[1,1,256,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x128>>, >
shape: #ttnn.shape<1x1x289x128>
tensor<[1,1,289,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x192>>, >
shape: #ttnn.shape<1x1x289x192>
tensor<[1,1,289,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x256>>, >
shape: #ttnn.shape<1x1x289x256>
tensor<[1,1,289,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x384>>, >
shape: #ttnn.shape<1x1x289x384>
tensor<[1,1,289,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1024>>, >
shape: #ttnn.shape<1x1x100x1024>
tensor<[1,1,100,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1024>>, >
shape: #ttnn.shape<1x1x1x1024>
tensor<[1,1,1,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1x1>
tensor<[1,1024,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<920x2048>>, >
shape: #ttnn.shape<1x1x920x2048>
tensor<[1,1,920,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 920 + d1 * 40 + d2, d3), memory_config: (920, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x256>>, >
shape: #ttnn.shape<1x1x3600x256>
tensor<[1,1,3600,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 80 + d2, d3), memory_config: (3600, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x512>>, >
shape: #ttnn.shape<1x1x3600x512>
tensor<[1,1,3600,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 80 + d2, d3), memory_config: (3600, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x256>>, >
shape: #ttnn.shape<1x1x512x256>
tensor<[1,1,512,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 + d2, d3), memory_config: (512, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x2048x7x7>
tensor<[1,2048,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 7 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x40>>, >
shape: #ttnn.shape<1x1x3136x40>
tensor<[1,1,3136,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x462>>, >
shape: #ttnn.shape<1x1x49x462>
tensor<[1,1,49,462,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 462, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x768>>, >
shape: #ttnn.shape<1x1x196x768>
tensor<[1,1,196,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x112>>, >
shape: #ttnn.shape<1x1x49x112>
tensor<[1,1,49,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x224>>, >
shape: #ttnn.shape<1x1x196x224>
tensor<[1,1,196,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x336>>, >
shape: #ttnn.shape<1x1x196x336>
tensor<[1,1,196,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x672>>, >
shape: #ttnn.shape<1x1x196x672>
tensor<[1,1,196,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x672>>, >
shape: #ttnn.shape<1x1x225x672>
tensor<[1,1,225,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x672>>, >
shape: #ttnn.shape<1x1x400x672>
tensor<[1,1,400,672,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 672, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x672>>, >
shape: #ttnn.shape<1x1x576x672>
tensor<[1,1,576,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x672>>, >
shape: #ttnn.shape<1x1x49x672>
tensor<[1,1,49,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1152>>, >
shape: #ttnn.shape<1x1x49x1152>
tensor<[1,1,49,1152,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1152, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1152>>, >
shape: #ttnn.shape<1x1x49x1152>
tensor<[1,1,49,1152,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1152, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x192>>, >
shape: #ttnn.shape<1x1x49x192>
tensor<[1,1,49,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x320>>, >
shape: #ttnn.shape<1x1x49x320>
tensor<[1,1,49,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1152>>, >
shape: #ttnn.shape<1x1x64x1152>
tensor<[1,1,64,1152,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1152, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1152>>, >
shape: #ttnn.shape<1x1x64x1152>
tensor<[1,1,64,1152,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1152, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x192>>, >
shape: #ttnn.shape<1x1x64x192>
tensor<[1,1,64,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x320>>, >
shape: #ttnn.shape<1x1x64x320>
tensor<[1,1,64,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x34>>, >
shape: #ttnn.shape<1x1x784x34>
tensor<[1,1,784,34,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 34, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x120>>, >
shape: #ttnn.shape<1x1x196x120>
tensor<[1,1,196,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x120>>, >
shape: #ttnn.shape<1x1x196x120>
tensor<[1,1,196,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x720>>, >
shape: #ttnn.shape<1x1x289x720>
tensor<[1,1,289,720,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 720, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1x32>
tensor<[1,1,1,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x480>>, >
shape: #ttnn.shape<1x1x1x480>
tensor<[1,1,1,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x480x1x1>
tensor<[1,480,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 + d2, d3), memory_config: (15, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x120>>, >
shape: #ttnn.shape<1x1x784x120>
tensor<[1,1,784,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x120>>, >
shape: #ttnn.shape<1x1x784x120>
tensor<[1,1,784,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x40>>, >
shape: #ttnn.shape<1x1x784x40>
tensor<[1,1,784,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1600x120>>, >
shape: #ttnn.shape<1x1x1600x120>
tensor<[1,1,1600,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (1600, 120, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1600x40>>, >
shape: #ttnn.shape<1x1x1600x40>
tensor<[1,1,1600,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (1600, 40, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x46>>, >
shape: #ttnn.shape<1x1x784x46>
tensor<[1,1,784,46,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 46, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x1248>>, >
shape: #ttnn.shape<1x1x81x1248>
tensor<[1,1,81,1248,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 1248, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x1248>>, >
shape: #ttnn.shape<1x1x81x1248>
tensor<[1,1,81,1248,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 1248, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x208>>, >
shape: #ttnn.shape<1x1x81x208>
tensor<[1,1,81,208,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 208, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x352>>, >
shape: #ttnn.shape<1x1x81x352>
tensor<[1,1,81,352,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 352, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1280>>, >
shape: #ttnn.shape<1x1x64x1280>
tensor<[1,1,64,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1200x1280>>, >
shape: #ttnn.shape<1x1x1200x1280>
tensor<[1,1,1200,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 40 + d2, d3), memory_config: (1200, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1200x2>>, >
shape: #ttnn.shape<1x1280x30x40>
tensor<[1,1280,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38400 + d1 * 30 + d2, d3), memory_config: (1200, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x1280>>, >
shape: #ttnn.shape<1x1x1024x1280>
tensor<[1,1,1024,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x1280x32x32>
tensor<[1,1280,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 32 + d2, d3), memory_config: (1280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1280>>, >
shape: #ttnn.shape<1x1x64x1280>
tensor<[1,1,64,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1280>>, >
shape: #ttnn.shape<1x1x64x1280>
tensor<[1,1,64,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x128>>, >
shape: #ttnn.shape<1x1x12544x128>
tensor<[1,1,12544,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x4>>, >
shape: #ttnn.shape<1x128x112x112>
tensor<[1,128,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 112 + d2, d3), memory_config: (448, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x128>>, >
shape: #ttnn.shape<1x1x12544x128>
tensor<[1,1,12544,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<19200x64>>, >
shape: #ttnn.shape<1x1x19200x64>
tensor<[1,1,19200,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 160 + d2, d3), memory_config: (19200, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x128>>, >
shape: #ttnn.shape<1x1x16384x128>
tensor<[1,1,16384,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x256>>, >
shape: #ttnn.shape<1x1x4096x256>
tensor<[1,1,4096,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x64>>, >
shape: #ttnn.shape<1x1x16384x64>
tensor<[1,1,16384,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x64>>, >
shape: #ttnn.shape<1x1x16384x64>
tensor<[1,1,16384,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x32>>, >
shape: #ttnn.shape<1x1x196x32>
tensor<[1,1,196,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x128>>, >
shape: #ttnn.shape<1x1x22500x128>
tensor<[1,1,22500,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x128>>, >
shape: #ttnn.shape<1x1x22500x128>
tensor<[1,1,22500,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x128>>, >
shape: #ttnn.shape<1x1x5625x128>
tensor<[1,1,5625,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x128>>, >
shape: #ttnn.shape<1x1x14400x128>
tensor<[1,1,14400,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 160 + d2, d3), memory_config: (14400, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x1x128>
tensor<[1,1,1,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 128, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x1x24>
tensor<[1,1,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24x1x1>
tensor<[1,24,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x546>>, >
shape: #ttnn.shape<1x1x1x546>
tensor<[1,1,1,546,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 546, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x546x1x1>
tensor<[1,546,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 546 + d1 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x64>>, >
shape: #ttnn.shape<1x1x50176x64>
tensor<[1,1,50176,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x16>>, >
shape: #ttnn.shape<1x1x784x16>
tensor<[1,1,784,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x192>>, >
shape: #ttnn.shape<1x1x784x192>
tensor<[1,1,784,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x19>>, >
shape: #ttnn.shape<1x1x784x19>
tensor<[1,1,784,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 19, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x1>>, >
shape: #ttnn.shape<1x19x28x28>
tensor<[1,19,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 532 + d1 * 28 + d2, d3), memory_config: (17, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x32>>, >
shape: #ttnn.shape<1x1x784x32>
tensor<[1,1,784,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x38>>, >
shape: #ttnn.shape<1x1x784x38>
tensor<[1,1,784,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 38, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<34x1>>, >
shape: #ttnn.shape<1x38x28x28>
tensor<[1,38,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1064 + d1 * 28 + d2, d3), memory_config: (34, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x256>>, >
shape: #ttnn.shape<1x1x4x256>
tensor<[1,1,4,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 2 + d2, d3), memory_config: (4, 256, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1200x64>>, >
shape: #ttnn.shape<1x1x1200x64>
tensor<[1,1,1200,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 40 + d2, d3), memory_config: (1200, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x1x1024x128>
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x1x1024x128>
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x128>>, >
shape: #ttnn.shape<1x1x4x128>
tensor<[1,1,4,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 2 + d2, d3), memory_config: (4, 128, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x256>>, >
shape: #ttnn.shape<1x1x9x256>
tensor<[1,1,9,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 256, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x32>>, >
shape: #ttnn.shape<1x1x3136x32>
tensor<[1,1,3136,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x128>>, >
shape: #ttnn.shape<1x1x9x128>
tensor<[1,1,9,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 128, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<300x128>>, >
shape: #ttnn.shape<1x1x300x128>
tensor<[1,1,300,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 20 + d2, d3), memory_config: (300, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x1>>, >
shape: #ttnn.shape<1x128x15x20>
tensor<[1,128,15,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 15 + d2, d3), memory_config: (60, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1200x320>>, >
shape: #ttnn.shape<1x1x1200x320>
tensor<[1,1,1200,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 40 + d2, d3), memory_config: (1200, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x2>>, >
shape: #ttnn.shape<1x320x30x40>
tensor<[1,320,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 30 + d2, d3), memory_config: (300, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4800x64>>, >
shape: #ttnn.shape<1x1x4800x64>
tensor<[1,1,4800,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 80 + d2, d3), memory_config: (4800, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4800x64>>, >
shape: #ttnn.shape<1x1x4800x64>
tensor<[1,1,4800,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 80 + d2, d3), memory_config: (4800, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x1x1024x128>
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x255>>, >
shape: #ttnn.shape<1x1x4096x255>
tensor<[1,1,4096,255,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 255, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<510x2>>, >
shape: #ttnn.shape<1x255x64x64>
tensor<[1,255,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16320 + d1 * 64 + d2, d3), memory_config: (510, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x256>>, >
shape: #ttnn.shape<1x1x4096x256>
tensor<[1,1,4096,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x64>>, >
shape: #ttnn.shape<1x1x4096x64>
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x128>>, >
shape: #ttnn.shape<1x1x5625x128>
tensor<[1,1,5625,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x128>>, >
shape: #ttnn.shape<1x1x5625x128>
tensor<[1,1,5625,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x256>>, >
shape: #ttnn.shape<1x1x5625x256>
tensor<[1,1,5625,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x32>>, >
shape: #ttnn.shape<1x1x49x32>
tensor<[1,1,49,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x128>>, >
shape: #ttnn.shape<1x1x14400x128>
tensor<[1,1,14400,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 160 + d2, d3), memory_config: (14400, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x512>>, >
shape: #ttnn.shape<1x1x14400x512>
tensor<[1,1,14400,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 160 + d2, d3), memory_config: (14400, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x12>>, >
shape: #ttnn.shape<1x1x3136x12>
tensor<[1,1,3136,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 12, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1344>>, >
shape: #ttnn.shape<1x1x196x1344>
tensor<[1,1,196,1344,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1344, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1344>>, >
shape: #ttnn.shape<1x1x196x1344>
tensor<[1,1,196,1344,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1344, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x2520>>, >
shape: #ttnn.shape<1x1x196x2520>
tensor<[1,1,196,2520,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 2520, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2520>>, >
shape: #ttnn.shape<1x1x49x2520>
tensor<[1,1,49,2520,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2520, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1344>>, >
shape: #ttnn.shape<1x1x196x1344>
tensor<[1,1,196,1344,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1344, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x816>>, >
shape: #ttnn.shape<1x1x361x816>
tensor<[1,1,361,816,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 816, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1392>>, >
shape: #ttnn.shape<1x1x100x1392>
tensor<[1,1,100,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1392>>, >
shape: #ttnn.shape<1x1x100x1392>
tensor<[1,1,100,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x232>>, >
shape: #ttnn.shape<1x1x100x232>
tensor<[1,1,100,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x384>>, >
shape: #ttnn.shape<1x1x100x384>
tensor<[1,1,100,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1392>>, >
shape: #ttnn.shape<1x1x196x1392>
tensor<[1,1,196,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1392>>, >
shape: #ttnn.shape<1x1x196x1392>
tensor<[1,1,196,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x3712>>, >
shape: #ttnn.shape<1x1x196x3712>
tensor<[1,1,196,3712,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 3712, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x3712>>, >
shape: #ttnn.shape<1x1x49x3712>
tensor<[1,1,49,3712,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 3712, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x174>>, >
shape: #ttnn.shape<1x1x1x174>
tensor<[1,1,1,174,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 174, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x174x1x1>
tensor<[1,174,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 174 + d1 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x348>>, >
shape: #ttnn.shape<1x1x1x348>
tensor<[1,1,1,348,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 348, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x348x1x1>
tensor<[1,348,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 348 + d1 + d2, d3), memory_config: (11, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1392>>, >
shape: #ttnn.shape<1x1x196x1392>
tensor<[1,1,196,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x68>>, >
shape: #ttnn.shape<1x1x3136x68>
tensor<[1,1,3136,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 68, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x288>>, >
shape: #ttnn.shape<1x1x196x288>
tensor<[1,1,196,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x144>>, >
shape: #ttnn.shape<1x1x5625x144>
tensor<[1,1,5625,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9025x144>>, >
shape: #ttnn.shape<1x1x9025x144>
tensor<[1,1,9025,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9025 + d1 * 95 + d2, d3), memory_config: (9025, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x28>>, >
shape: #ttnn.shape<1x1x784x28>
tensor<[1,1,784,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x32>>, >
shape: #ttnn.shape<1x1x784x32>
tensor<[1,1,784,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x40>>, >
shape: #ttnn.shape<1x1x784x40>
tensor<[1,1,784,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<900x40>>, >
shape: #ttnn.shape<1x1x900x40>
tensor<[1,1,900,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 900 + d1 * 30 + d2, d3), memory_config: (900, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1089x48>>, >
shape: #ttnn.shape<1x1x1089x48>
tensor<[1,1,1089,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1089 + d1 * 33 + d2, d3), memory_config: (1089, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x144>>, >
shape: #ttnn.shape<1x1x3136x144>
tensor<[1,1,3136,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x144>>, >
shape: #ttnn.shape<1x1x784x144>
tensor<[1,1,784,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x144>>, >
shape: #ttnn.shape<1x1x784x144>
tensor<[1,1,784,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x144>>, >
shape: #ttnn.shape<1x1x3600x144>
tensor<[1,1,3600,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 60 + d2, d3), memory_config: (3600, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x24>>, >
shape: #ttnn.shape<1x1x3600x24>
tensor<[1,1,3600,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 60 + d2, d3), memory_config: (3600, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<900x144>>, >
shape: #ttnn.shape<1x1x900x144>
tensor<[1,1,900,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 900 + d1 * 30 + d2, d3), memory_config: (900, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4225x144>>, >
shape: #ttnn.shape<1x1x4225x144>
tensor<[1,1,4225,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4225 + d1 * 65 + d2, d3), memory_config: (4225, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4225x24>>, >
shape: #ttnn.shape<1x1x4225x24>
tensor<[1,1,4225,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4225 + d1 * 65 + d2, d3), memory_config: (4225, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1089x144>>, >
shape: #ttnn.shape<1x1x1089x144>
tensor<[1,1,1089,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1089 + d1 * 33 + d2, d3), memory_config: (1089, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x32>>, >
shape: #ttnn.shape<1x1x5625x32>
tensor<[1,1,5625,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x144>>, >
shape: #ttnn.shape<1x1x49x144>
tensor<[1,1,49,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x18>>, >
shape: #ttnn.shape<1x1x49x18>
tensor<[1,1,49,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x256>>, >
shape: #ttnn.shape<1x1x49x256>
tensor<[1,1,49,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x36>>, >
shape: #ttnn.shape<1x1x49x36>
tensor<[1,1,49,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x72>>, >
shape: #ttnn.shape<1x1x49x72>
tensor<[1,1,49,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9025x32>>, >
shape: #ttnn.shape<1x1x9025x32>
tensor<[1,1,9025,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9025 + d1 * 95 + d2, d3), memory_config: (9025, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x58>>, >
shape: #ttnn.shape<1x1x784x58>
tensor<[1,1,784,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 58, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1536>>, >
shape: #ttnn.shape<1x1x100x1536>
tensor<[1,1,100,1536,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1536, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1536>>, >
shape: #ttnn.shape<1x1x100x1536>
tensor<[1,1,100,1536,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1536, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x2048>>, >
shape: #ttnn.shape<1x1x100x2048>
tensor<[1,1,100,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x256>>, >
shape: #ttnn.shape<1x1x64x256>
tensor<[1,1,64,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x384>>, >
shape: #ttnn.shape<1x1x64x384>
tensor<[1,1,64,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x68>>, >
shape: #ttnn.shape<1x1x196x68>
tensor<[1,1,196,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 68, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x320>>, >
shape: #ttnn.shape<1x1x196x320>
tensor<[1,1,196,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x960>>, >
shape: #ttnn.shape<1x1x576x960>
tensor<[1,1,576,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x160>>, >
shape: #ttnn.shape<1x1x784x160>
tensor<[1,1,784,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x160>>, >
shape: #ttnn.shape<1x1x784x160>
tensor<[1,1,784,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x160>>, >
shape: #ttnn.shape<1x1x256x160>
tensor<[1,1,256,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x160x16x16>
tensor<[1,160,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 16 + d2, d3), memory_config: (80, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x256>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x960>>, >
shape: #ttnn.shape<1x1x9x960>
tensor<[1,1,9,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5329x64>>, >
shape: #ttnn.shape<1x1x5329x64>
tensor<[1,1,5329,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 73 + d2, d3), memory_config: (5329, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x320>>, >
shape: #ttnn.shape<1x1x49x320>
tensor<[1,1,49,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x480>>, >
shape: #ttnn.shape<1x1x49x480>
tensor<[1,1,49,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x960>>, >
shape: #ttnn.shape<1x1x49x960>
tensor<[1,1,49,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1632>>, >
shape: #ttnn.shape<1x1x144x1632>
tensor<[1,1,144,1632,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 1632, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1632>>, >
shape: #ttnn.shape<1x1x144x1632>
tensor<[1,1,144,1632,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 1632, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x272>>, >
shape: #ttnn.shape<1x1x144x272>
tensor<[1,1,144,272,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 272, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x448>>, >
shape: #ttnn.shape<1x1x144x448>
tensor<[1,1,144,448,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 448, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x672>>, >
shape: #ttnn.shape<1x1x1x672>
tensor<[1,1,1,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x16>>, >
shape: #ttnn.shape<1x1x12544x16>
tensor<[1,1,12544,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x16>>, >
shape: #ttnn.shape<1x1x12544x16>
tensor<[1,1,12544,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x16>>, >
shape: #ttnn.shape<1x1x3136x16>
tensor<[1,1,3136,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x24>>, >
shape: #ttnn.shape<1x1x12544x24>
tensor<[1,1,12544,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x8>>, >
shape: #ttnn.shape<1x1x12544x8>
tensor<[1,1,12544,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 8, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x96>>, >
shape: #ttnn.shape<1x1x12544x96>
tensor<[1,1,12544,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x96>>, >
shape: #ttnn.shape<1x1x14400x96>
tensor<[1,1,14400,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 120 + d2, d3), memory_config: (14400, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16900x96>>, >
shape: #ttnn.shape<1x1x16900x96>
tensor<[1,1,16900,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16900 + d1 * 130 + d2, d3), memory_config: (16900, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x48>>, >
shape: #ttnn.shape<1x1x196x48>
tensor<[1,1,196,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x4>>, >
shape: #ttnn.shape<1x1x196x4>
tensor<[1,1,196,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x4x14x14>
tensor<[1,4,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 14 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25600x16>>, >
shape: #ttnn.shape<1x1x25600x16>
tensor<[1,1,25600,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25600 + d1 * 160 + d2, d3), memory_config: (25600, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25600x16>>, >
shape: #ttnn.shape<1x1x25600x16>
tensor<[1,1,25600,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25600 + d1 * 160 + d2, d3), memory_config: (25600, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25600x64>>, >
shape: #ttnn.shape<1x1x25600x64>
tensor<[1,1,25600,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25600 + d1 * 160 + d2, d3), memory_config: (25600, 64, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x16>>, >
shape: #ttnn.shape<1x1x50176x16>
tensor<[1,1,50176,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x1x12544x32>
tensor<[1,1,12544,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x32>>, >
shape: #ttnn.shape<1x1x784x32>
tensor<[1,1,784,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x46>>, >
shape: #ttnn.shape<1x1x784x46>
tensor<[1,1,784,46,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 46, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1392>>, >
shape: #ttnn.shape<1x1x1x1392>
tensor<[1,1,1,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x1392x1x1>
tensor<[1,1392,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1392 + d1 + d2, d3), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x696>>, >
shape: #ttnn.shape<1x1x1x696>
tensor<[1,1,1,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<22x1>>, >
shape: #ttnn.shape<1x696x1x1>
tensor<[1,696,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 696 + d1 + d2, d3), memory_config: (22, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x896>>, >
shape: #ttnn.shape<1x1x196x896>
tensor<[1,1,196,896,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 896, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x184>>, >
shape: #ttnn.shape<1x1x196x184>
tensor<[1,1,196,184,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 184, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x80>>, >
shape: #ttnn.shape<1x1x196x80>
tensor<[1,1,196,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x184>>, >
shape: #ttnn.shape<1x1x400x184>
tensor<[1,1,400,184,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 184, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x80>>, >
shape: #ttnn.shape<1x1x400x80>
tensor<[1,1,400,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 80, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x184>>, >
shape: #ttnn.shape<1x1x49x184>
tensor<[1,1,49,184,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 184, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x184>>, >
shape: #ttnn.shape<1x1x49x184>
tensor<[1,1,49,184,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 184, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x144>>, >
shape: #ttnn.shape<1x1x49x144>
tensor<[1,1,49,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x18>>, >
shape: #ttnn.shape<1x1x196x18>
tensor<[1,1,196,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x72>>, >
shape: #ttnn.shape<1x1x196x72>
tensor<[1,1,196,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x18>>, >
shape: #ttnn.shape<1x1x3136x18>
tensor<[1,1,3136,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x18>>, >
shape: #ttnn.shape<1x1x784x18>
tensor<[1,1,784,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x32>>, >
shape: #ttnn.shape<1x1x3136x32>
tensor<[1,1,3136,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x36>>, >
shape: #ttnn.shape<1x1x784x36>
tensor<[1,1,784,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x192>>, >
shape: #ttnn.shape<1x1x196x192>
tensor<[1,1,196,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x192>>, >
shape: #ttnn.shape<1x1x196x192>
tensor<[1,1,196,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x64>>, >
shape: #ttnn.shape<1x1x196x64>
tensor<[1,1,196,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x192>>, >
shape: #ttnn.shape<1x1x64x192>
tensor<[1,1,64,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x192>>, >
shape: #ttnn.shape<1x1x289x192>
tensor<[1,1,289,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x224>>, >
shape: #ttnn.shape<1x1x289x224>
tensor<[1,1,289,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x16>>, >
shape: #ttnn.shape<1x1x784x16>
tensor<[1,1,784,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x192>>, >
shape: #ttnn.shape<1x1x784x192>
tensor<[1,1,784,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x192>>, >
shape: #ttnn.shape<1x1x196x192>
tensor<[1,1,196,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x32>>, >
shape: #ttnn.shape<1x1x784x32>
tensor<[1,1,784,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x64>>, >
shape: #ttnn.shape<1x1x784x64>
tensor<[1,1,784,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x96>>, >
shape: #ttnn.shape<1x1x784x96>
tensor<[1,1,784,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x224>>, >
shape: #ttnn.shape<1x1x1225x224>
tensor<[1,1,1225,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x48>>, >
shape: #ttnn.shape<1x1x1444x48>
tensor<[1,1,1444,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2304x56>>, >
shape: #ttnn.shape<1x1x2304x56>
tensor<[1,1,2304,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2304 + d1 * 48 + d2, d3), memory_config: (2304, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x192>>, >
shape: #ttnn.shape<1x1x1225x192>
tensor<[1,1,1225,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x192>>, >
shape: #ttnn.shape<1x1x5625x192>
tensor<[1,1,5625,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x32>>, >
shape: #ttnn.shape<1x1x5625x32>
tensor<[1,1,5625,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x192>>, >
shape: #ttnn.shape<1x1x1444x192>
tensor<[1,1,1444,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1152>>, >
shape: #ttnn.shape<1x1x49x1152>
tensor<[1,1,49,1152,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1152, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x384>>, >
shape: #ttnn.shape<1x1x49x384>
tensor<[1,1,49,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1152>>, >
shape: #ttnn.shape<1x1x64x1152>
tensor<[1,1,64,1152,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1152, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9025x192>>, >
shape: #ttnn.shape<1x1x9025x192>
tensor<[1,1,9025,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9025 + d1 * 95 + d2, d3), memory_config: (9025, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9025x32>>, >
shape: #ttnn.shape<1x1x9025x32>
tensor<[1,1,9025,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9025 + d1 * 95 + d2, d3), memory_config: (9025, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2304x192>>, >
shape: #ttnn.shape<1x1x2304x192>
tensor<[1,1,2304,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2304 + d1 * 48 + d2, d3), memory_config: (2304, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x16>>, >
shape: #ttnn.shape<1x1x784x16>
tensor<[1,1,784,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x16x28x28>
tensor<[1,16,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<676x32>>, >
shape: #ttnn.shape<1x1x676x32>
tensor<[1,1,676,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 676 + d1 * 26 + d2, d3), memory_config: (676, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<26x1>>, >
shape: #ttnn.shape<1x32x26x26>
tensor<[1,32,26,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 832 + d1 * 26 + d2, d3), memory_config: (26, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x200>>, >
shape: #ttnn.shape<1x1x196x200>
tensor<[1,1,196,200,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 200, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x80>>, >
shape: #ttnn.shape<1x1x196x80>
tensor<[1,1,196,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x200>>, >
shape: #ttnn.shape<1x1x400x200>
tensor<[1,1,400,200,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 200, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x80>>, >
shape: #ttnn.shape<1x1x400x80>
tensor<[1,1,400,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 80, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x200>>, >
shape: #ttnn.shape<1x1x49x200>
tensor<[1,1,49,200,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 200, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x200>>, >
shape: #ttnn.shape<1x1x49x200>
tensor<[1,1,49,200,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 200, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<300x2048>>, >
shape: #ttnn.shape<1x1x300x2048>
tensor<[1,1,300,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 20 + d2, d3), memory_config: (300, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x2048x15x20>
tensor<[1,2048,15,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 15 + d2, d3), memory_config: (960, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<920x256>>, >
shape: #ttnn.shape<1x1x920x256>
tensor<[1,1,920,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 920 + d1 * 40 + d2, d3), memory_config: (920, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<184x2>>, >
shape: #ttnn.shape<1x256x23x40>
tensor<[1,256,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5888 + d1 * 23 + d2, d3), memory_config: (184, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<920x512>>, >
shape: #ttnn.shape<1x1x920x512>
tensor<[1,1,920,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 920 + d1 * 40 + d2, d3), memory_config: (920, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x1248>>, >
shape: #ttnn.shape<1x1x81x1248>
tensor<[1,1,81,1248,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 1248, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x72>>, >
shape: #ttnn.shape<1x1x1x72>
tensor<[1,1,1,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x72x1x1>
tensor<[1,72,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x78>>, >
shape: #ttnn.shape<1x1x784x78>
tensor<[1,1,784,78,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 78, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x224>>, >
shape: #ttnn.shape<1x1x289x224>
tensor<[1,1,289,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x256>>, >
shape: #ttnn.shape<1x1x289x256>
tensor<[1,1,289,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x256>>, >
shape: #ttnn.shape<1x1x289x256>
tensor<[1,1,289,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x256>>, >
shape: #ttnn.shape<1x1x289x256>
tensor<[1,1,289,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x224>>, >
shape: #ttnn.shape<1x1x49x224>
tensor<[1,1,49,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x224>>, >
shape: #ttnn.shape<1x1x49x224>
tensor<[1,1,49,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1392>>, >
shape: #ttnn.shape<1x1x100x1392>
tensor<[1,1,100,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x232>>, >
shape: #ttnn.shape<1x1x3136x232>
tensor<[1,1,3136,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x58>>, >
shape: #ttnn.shape<1x1x1x58>
tensor<[1,1,1,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 58, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x58x1x1>
tensor<[1,58,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 58 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<1x1x1x8>
tensor<[1,1,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 8, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x1>
tensor<[1,8,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x232>>, >
shape: #ttnn.shape<1x1x3136x232>
tensor<[1,1,3136,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x232>>, >
shape: #ttnn.shape<1x1x3136x232>
tensor<[1,1,3136,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x696>>, >
shape: #ttnn.shape<1x1x3136x696>
tensor<[1,1,3136,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x696>>, >
shape: #ttnn.shape<1x1x784x696>
tensor<[1,1,784,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x68>>, >
shape: #ttnn.shape<1x1x196x68>
tensor<[1,1,196,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 68, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x80>>, >
shape: #ttnn.shape<1x1x196x80>
tensor<[1,1,196,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x80>>, >
shape: #ttnn.shape<1x1x225x80>
tensor<[1,1,225,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x960>>, >
shape: #ttnn.shape<1x1x1x960>
tensor<[1,1,1,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x960x1x1>
tensor<[1,960,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x80>>, >
shape: #ttnn.shape<1x1x400x80>
tensor<[1,1,400,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 80, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x240>>, >
shape: #ttnn.shape<1x1x784x240>
tensor<[1,1,784,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x40>>, >
shape: #ttnn.shape<1x1x784x40>
tensor<[1,1,784,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<900x240>>, >
shape: #ttnn.shape<1x1x900x240>
tensor<[1,1,900,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 900 + d1 * 30 + d2, d3), memory_config: (900, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<900x40>>, >
shape: #ttnn.shape<1x1x900x40>
tensor<[1,1,900,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 900 + d1 * 30 + d2, d3), memory_config: (900, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x240>>, >
shape: #ttnn.shape<1x1x225x240>
tensor<[1,1,225,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x240>>, >
shape: #ttnn.shape<1x1x400x240>
tensor<[1,1,400,240,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 240, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x24>>, >
shape: #ttnn.shape<1x1x12544x24>
tensor<[1,1,12544,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x64>>, >
shape: #ttnn.shape<1x1x196x64>
tensor<[1,1,196,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x144>>, >
shape: #ttnn.shape<1x1x22500x144>
tensor<[1,1,22500,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<36100x144>>, >
shape: #ttnn.shape<1x1x36100x144>
tensor<[1,1,36100,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36100 + d1 * 190 + d2, d3), memory_config: (36100, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x72>>, >
shape: #ttnn.shape<1x1x1x72>
tensor<[1,1,1,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x72x1x1>
tensor<[1,72,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x40>>, >
shape: #ttnn.shape<1x1x784x40>
tensor<[1,1,784,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x72>>, >
shape: #ttnn.shape<1x1x784x72>
tensor<[1,1,784,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x144>>, >
shape: #ttnn.shape<1x1x3136x144>
tensor<[1,1,3136,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x14>>, >
shape: #ttnn.shape<1x1x3136x14>
tensor<[1,1,3136,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x24>>, >
shape: #ttnn.shape<1x1x784x24>
tensor<[1,1,784,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x36>>, >
shape: #ttnn.shape<1x1x3136x36>
tensor<[1,1,3136,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x72>>, >
shape: #ttnn.shape<1x1x3136x72>
tensor<[1,1,3136,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x144>>, >
shape: #ttnn.shape<1x1x3600x144>
tensor<[1,1,3600,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 60 + d2, d3), memory_config: (3600, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4225x144>>, >
shape: #ttnn.shape<1x1x4225x144>
tensor<[1,1,4225,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4225 + d1 * 65 + d2, d3), memory_config: (4225, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6400x72>>, >
shape: #ttnn.shape<1x1x6400x72>
tensor<[1,1,6400,72,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6400 + d1 * 80 + d2, d3), memory_config: (6400, 72, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2520>>, >
shape: #ttnn.shape<1x1x49x2520>
tensor<[1,1,49,2520,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2520, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2520>>, >
shape: #ttnn.shape<1x1x49x2520>
tensor<[1,1,49,2520,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2520, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1280>>, >
shape: #ttnn.shape<1x1x64x1280>
tensor<[1,1,64,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1280>>, >
shape: #ttnn.shape<1x1x64x1280>
tensor<[1,1,64,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x256>>, >
shape: #ttnn.shape<1x1x25x256>
tensor<[1,1,25,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 5 + d2, d3), memory_config: (25, 256, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x128>>, >
shape: #ttnn.shape<1x1x12544x128>
tensor<[1,1,12544,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<19200x256>>, >
shape: #ttnn.shape<1x1x19200x256>
tensor<[1,1,19200,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 160 + d2, d3), memory_config: (19200, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x5>>, >
shape: #ttnn.shape<1x256x120x160>
tensor<[1,256,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 120 + d2, d3), memory_config: (960, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x150>>, >
shape: #ttnn.shape<1x1x16384x150>
tensor<[1,1,16384,150,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 150, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x4>>, >
shape: #ttnn.shape<1x150x128x128>
tensor<[1,150,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 128 + d2, d3), memory_config: (600, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x256>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x256>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x512>>, >
shape: #ttnn.shape<1x1x256x512>
tensor<[1,1,256,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x256>>, >
shape: #ttnn.shape<1x1x289x256>
tensor<[1,1,289,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x320>>, >
shape: #ttnn.shape<1x1x289x320>
tensor<[1,1,289,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<57600x128>>, >
shape: #ttnn.shape<1x1x57600x128>
tensor<[1,1,57600,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 320 + d2, d3), memory_config: (57600, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x512>>, >
shape: #ttnn.shape<1x1x14400x512>
tensor<[1,1,14400,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 160 + d2, d3), memory_config: (14400, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<57600x64>>, >
shape: #ttnn.shape<1x1x57600x64>
tensor<[1,1,57600,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 320 + d2, d3), memory_config: (57600, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x1x1x256>
tensor<[1,1,1,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1x1>
tensor<[1,256,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x160>>, >
shape: #ttnn.shape<1x1x784x160>
tensor<[1,1,784,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x32>>, >
shape: #ttnn.shape<1x1x784x32>
tensor<[1,1,784,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x64>>, >
shape: #ttnn.shape<1x1x784x64>
tensor<[1,1,784,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x24>>, >
shape: #ttnn.shape<1x1x4x24>
tensor<[1,1,4,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 2 + d2, d3), memory_config: (4, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x24x2x2>
tensor<[1,24,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 48 + d1 * 2 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x256>>, >
shape: #ttnn.shape<1x1x4x256>
tensor<[1,1,4,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 2 + d2, d3), memory_config: (4, 256, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x546>>, >
shape: #ttnn.shape<1x1x4x546>
tensor<[1,1,4,546,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 2 + d2, d3), memory_config: (4, 546, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x546x2x2>
tensor<[1,546,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1092 + d1 * 2 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x64>>, >
shape: #ttnn.shape<1x1x4x64>
tensor<[1,1,4,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 2 + d2, d3), memory_config: (4, 64, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x1x1024x128>
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x255>>, >
shape: #ttnn.shape<1x1x1024x255>
tensor<[1,1,1024,255,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 255, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<255x1>>, >
shape: #ttnn.shape<1x255x32x32>
tensor<[1,255,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8160 + d1 * 32 + d2, d3), memory_config: (255, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x256>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x512>>, >
shape: #ttnn.shape<1x1x1024x512>
tensor<[1,1,1024,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x512>>, >
shape: #ttnn.shape<1x1x256x512>
tensor<[1,1,256,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x256>>, >
shape: #ttnn.shape<1x1x1444x256>
tensor<[1,1,1444,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x256>>, >
shape: #ttnn.shape<1x1x1444x256>
tensor<[1,1,1444,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x728>>, >
shape: #ttnn.shape<1x1x1444x728>
tensor<[1,1,1444,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x128>>, >
shape: #ttnn.shape<1x1x9x128>
tensor<[1,1,9,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 128, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x1x9x24>
tensor<[1,1,9,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x24x3x3>
tensor<[1,24,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 * 3 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x256>>, >
shape: #ttnn.shape<1x1x9x256>
tensor<[1,1,9,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 256, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x546>>, >
shape: #ttnn.shape<1x1x9x546>
tensor<[1,1,9,546,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 546, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<52x1>>, >
shape: #ttnn.shape<1x546x3x3>
tensor<[1,546,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1638 + d1 * 3 + d2, d3), memory_config: (52, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x1024>>, >
shape: #ttnn.shape<1x1x3600x1024>
tensor<[1,1,3600,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 80 + d2, d3), memory_config: (3600, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x256>>, >
shape: #ttnn.shape<1x1x3600x256>
tensor<[1,1,3600,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 80 + d2, d3), memory_config: (3600, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x1024>>, >
shape: #ttnn.shape<1x1x512x1024>
tensor<[1,1,512,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 + d2, d3), memory_config: (512, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x16>>, >
shape: #ttnn.shape<1x1024x512>
tensor<[1,1024,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x18>>, >
shape: #ttnn.shape<1x1x3136x18>
tensor<[1,1,3136,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x36>>, >
shape: #ttnn.shape<1x1x784x36>
tensor<[1,1,784,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x512>>, >
shape: #ttnn.shape<1x1x3136x512>
tensor<[1,1,3136,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x512>>, >
shape: #ttnn.shape<1x1x25x512>
tensor<[1,1,25,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 5 + d2, d3), memory_config: (25, 512, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x255>>, >
shape: #ttnn.shape<1x1x4096x255>
tensor<[1,1,4096,255,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 255, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<510x2>>, >
shape: #ttnn.shape<1x255x64x64>
tensor<[1,255,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16320 + d1 * 64 + d2, d3), memory_config: (510, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x256>>, >
shape: #ttnn.shape<1x1x4096x256>
tensor<[1,1,4096,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x512>>, >
shape: #ttnn.shape<1x1x1024x512>
tensor<[1,1,1024,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x64>>, >
shape: #ttnn.shape<1x1x4096x64>
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x256>>, >
shape: #ttnn.shape<1x1x5625x256>
tensor<[1,1,5625,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x256>>, >
shape: #ttnn.shape<1x1x1444x256>
tensor<[1,1,1444,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x256>>, >
shape: #ttnn.shape<1x1x5625x256>
tensor<[1,1,5625,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x256>>, >
shape: #ttnn.shape<1x1x1444x256>
tensor<[1,1,1444,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x256>>, >
shape: #ttnn.shape<1x1x49x256>
tensor<[1,1,49,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x256>>, >
shape: #ttnn.shape<1x1x3600x256>
tensor<[1,1,3600,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 80 + d2, d3), memory_config: (3600, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1632>>, >
shape: #ttnn.shape<1x1x144x1632>
tensor<[1,1,144,1632,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 1632, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x34>>, >
shape: #ttnn.shape<1x1x784x34>
tensor<[1,1,784,34,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 34, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x88>>, >
shape: #ttnn.shape<1x1x289x88>
tensor<[1,1,289,88,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 88, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x96>>, >
shape: #ttnn.shape<1x1x361x96>
tensor<[1,1,361,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1089x288>>, >
shape: #ttnn.shape<1x1x1089x288>
tensor<[1,1,1089,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1089 + d1 * 33 + d2, d3), memory_config: (1089, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1089x48>>, >
shape: #ttnn.shape<1x1x1089x48>
tensor<[1,1,1089,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1089 + d1 * 33 + d2, d3), memory_config: (1089, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x288>>, >
shape: #ttnn.shape<1x1x289x288>
tensor<[1,1,289,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x288>>, >
shape: #ttnn.shape<1x1x1444x288>
tensor<[1,1,1444,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x48>>, >
shape: #ttnn.shape<1x1x1444x48>
tensor<[1,1,1444,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x288>>, >
shape: #ttnn.shape<1x1x361x288>
tensor<[1,1,361,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x16>>, >
shape: #ttnn.shape<1x1x784x16>
tensor<[1,1,784,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x134>>, >
shape: #ttnn.shape<1x1x784x134>
tensor<[1,1,784,134,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 134, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x116>>, >
shape: #ttnn.shape<1x1x196x116>
tensor<[1,1,196,116,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 116, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<13x3072>>, >
shape: #ttnn.shape<1x1x13x3072>
tensor<[1,1,13,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13 + d1 + d2, d3), memory_config: (13, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14x3072>>, >
shape: #ttnn.shape<1x1x14x3072>
tensor<[1,1,14,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14 + d1 + d2, d3), memory_config: (14, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<15x3072>>, >
shape: #ttnn.shape<1x1x15x3072>
tensor<[1,1,15,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15 + d1 + d2, d3), memory_config: (15, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16x3072>>, >
shape: #ttnn.shape<1x1x16x3072>
tensor<[1,1,16,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (16, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x16>
tensor<[1,3072,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<17x3072>>, >
shape: #ttnn.shape<1x1x17x3072>
tensor<[1,1,17,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17 + d1 + d2, d3), memory_config: (17, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x17>
tensor<[1,3072,17,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<18x3072>>, >
shape: #ttnn.shape<1x1x18x3072>
tensor<[1,1,18,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18 + d1 + d2, d3), memory_config: (18, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x18>
tensor<[1,3072,18,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x3072>>, >
shape: #ttnn.shape<1x1x9x3072>
tensor<[1,1,9,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 + d2, d3), memory_config: (9, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<10x3072>>, >
shape: #ttnn.shape<1x1x10x3072>
tensor<[1,1,10,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10 + d1 + d2, d3), memory_config: (10, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<11x3072>>, >
shape: #ttnn.shape<1x1x11x3072>
tensor<[1,1,11,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11 + d1 + d2, d3), memory_config: (11, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x768>>, >
shape: #ttnn.shape<1x1x8x768>
tensor<[1,1,8,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (8, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x8>
tensor<[1,768,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12x3072>>, >
shape: #ttnn.shape<1x1x12x3072>
tensor<[1,1,12,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (12, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x58>>, >
shape: #ttnn.shape<1x1x784x58>
tensor<[1,1,784,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 58, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x320>>, >
shape: #ttnn.shape<1x1x64x320>
tensor<[1,1,64,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<300x320>>, >
shape: #ttnn.shape<1x1x300x320>
tensor<[1,1,300,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 20 + d2, d3), memory_config: (300, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x320x15x20>
tensor<[1,320,15,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 15 + d2, d3), memory_config: (150, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<300x512>>, >
shape: #ttnn.shape<1x1x300x512>
tensor<[1,1,300,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 20 + d2, d3), memory_config: (300, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x1>>, >
shape: #ttnn.shape<1x512x15x20>
tensor<[1,512,15,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 15 + d2, d3), memory_config: (240, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1200x64>>, >
shape: #ttnn.shape<1x1x1200x64>
tensor<[1,1,1200,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 40 + d2, d3), memory_config: (1200, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x320>>, >
shape: #ttnn.shape<1x1x1024x320>
tensor<[1,1,1024,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x320x32x32>
tensor<[1,320,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 32 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x4>>, >
shape: #ttnn.shape<1x1x4096x4>
tensor<[1,1,4096,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x4x64x64>
tensor<[1,4,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 64 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1280>>, >
shape: #ttnn.shape<1x1x49x1280>
tensor<[1,1,49,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x1280>>, >
shape: #ttnn.shape<1x1x64x1280>
tensor<[1,1,64,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x320>>, >
shape: #ttnn.shape<1x1x784x320>
tensor<[1,1,784,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x16>>, >
shape: #ttnn.shape<1x1x12544x16>
tensor<[1,1,12544,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x232>>, >
shape: #ttnn.shape<1x1x12544x232>
tensor<[1,1,12544,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x232>>, >
shape: #ttnn.shape<1x1x3136x232>
tensor<[1,1,3136,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x1x12544x32>
tensor<[1,1,12544,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x336>>, >
shape: #ttnn.shape<1x1x12544x336>
tensor<[1,1,12544,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x336>>, >
shape: #ttnn.shape<1x1x3136x336>
tensor<[1,1,3136,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x16>>, >
shape: #ttnn.shape<1x1x14400x16>
tensor<[1,1,14400,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 120 + d2, d3), memory_config: (14400, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x32>>, >
shape: #ttnn.shape<1x1x14400x32>
tensor<[1,1,14400,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 120 + d2, d3), memory_config: (14400, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<19200x2>>, >
shape: #ttnn.shape<1x1x19200x2>
tensor<[1,1,19200,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 160 + d2, d3), memory_config: (19200, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x2x120x160>
tensor<[1,2,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 * 120 + d2, d3), memory_config: (8, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x32>>, >
shape: #ttnn.shape<1x1x16384x32>
tensor<[1,1,16384,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x32>>, >
shape: #ttnn.shape<1x1x16384x32>
tensor<[1,1,16384,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x32>>, >
shape: #ttnn.shape<1x1x256x32>
tensor<[1,1,256,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x32x16x16>
tensor<[1,32,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 16 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x64>>, >
shape: #ttnn.shape<1x1x4096x64>
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x64x64x64>
tensor<[1,64,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x64>>, >
shape: #ttnn.shape<1x1x16384x64>
tensor<[1,1,16384,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16900x16>>, >
shape: #ttnn.shape<1x1x16900x16>
tensor<[1,1,16900,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16900 + d1 * 130 + d2, d3), memory_config: (16900, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16900x32>>, >
shape: #ttnn.shape<1x1x16900x32>
tensor<[1,1,16900,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16900 + d1 * 130 + d2, d3), memory_config: (16900, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<21609x64>>, >
shape: #ttnn.shape<1x1x21609x64>
tensor<[1,1,21609,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21609 + d1 * 147 + d2, d3), memory_config: (21609, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<21609x32>>, >
shape: #ttnn.shape<1x1x21609x32>
tensor<[1,1,21609,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21609 + d1 * 147 + d2, d3), memory_config: (21609, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x64>>, >
shape: #ttnn.shape<1x1x196x64>
tensor<[1,1,196,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x24>>, >
shape: #ttnn.shape<1x1x22500x24>
tensor<[1,1,22500,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x32>>, >
shape: #ttnn.shape<1x1x22500x32>
tensor<[1,1,22500,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x64>>, >
shape: #ttnn.shape<1x1x22500x64>
tensor<[1,1,22500,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<36100x24>>, >
shape: #ttnn.shape<1x1x36100x24>
tensor<[1,1,36100,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36100 + d1 * 190 + d2, d3), memory_config: (36100, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<36100x32>>, >
shape: #ttnn.shape<1x1x36100x32>
tensor<[1,1,36100,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36100 + d1 * 190 + d2, d3), memory_config: (36100, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x120>>, >
shape: #ttnn.shape<1x1x1x120>
tensor<[1,1,1,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x1>>, >
shape: #ttnn.shape<1x1x65536x1>
tensor<[1,1,65536,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x32>>, >
shape: #ttnn.shape<1x1x65536x32>
tensor<[1,1,65536,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x64>>, >
shape: #ttnn.shape<1x1x65536x64>
tensor<[1,1,65536,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x64>>, >
shape: #ttnn.shape<1x1x16384x64>
tensor<[1,1,16384,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x64>>, >
shape: #ttnn.shape<1x1x576x64>
tensor<[1,1,576,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x64x24x24>
tensor<[1,64,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 24 + d2, d3), memory_config: (48, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x192>>, >
shape: #ttnn.shape<1x1x784x192>
tensor<[1,1,784,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x96>>, >
shape: #ttnn.shape<1x1x784x96>
tensor<[1,1,784,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1200x2>>, >
shape: #ttnn.shape<1x1x1200x2>
tensor<[1,1,1200,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 40 + d2, d3), memory_config: (1200, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x2x30x40>
tensor<[1,2,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 60 + d1 * 30 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x64>>, >
shape: #ttnn.shape<1x1x65536x64>
tensor<[1,1,65536,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x32>>, >
shape: #ttnn.shape<1x1x3136x32>
tensor<[1,1,3136,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4800x2>>, >
shape: #ttnn.shape<1x1x4800x2>
tensor<[1,1,4800,2,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 80 + d2, d3), memory_config: (4800, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<1x2x60x80>
tensor<[1,2,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 60 + d2, d3), memory_config: (4, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x192>>, >
shape: #ttnn.shape<1x1x5625x192>
tensor<[1,1,5625,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9025x192>>, >
shape: #ttnn.shape<1x1x9025x192>
tensor<[1,1,9025,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9025 + d1 * 95 + d2, d3), memory_config: (9025, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x336>>, >
shape: #ttnn.shape<1x1x3136x336>
tensor<[1,1,3136,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x336>>, >
shape: #ttnn.shape<1x1x196x336>
tensor<[1,1,196,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x112>>, >
shape: #ttnn.shape<1x1x576x112>
tensor<[1,1,576,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2304x336>>, >
shape: #ttnn.shape<1x1x2304x336>
tensor<[1,1,2304,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2304 + d1 * 48 + d2, d3), memory_config: (2304, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2304x56>>, >
shape: #ttnn.shape<1x1x2304x56>
tensor<[1,1,2304,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2304 + d1 * 48 + d2, d3), memory_config: (2304, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x336>>, >
shape: #ttnn.shape<1x1x576x336>
tensor<[1,1,576,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x336>>, >
shape: #ttnn.shape<1x1x3136x336>
tensor<[1,1,3136,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x336>>, >
shape: #ttnn.shape<1x1x3136x336>
tensor<[1,1,3136,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x672>>, >
shape: #ttnn.shape<1x1x3136x672>
tensor<[1,1,3136,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x672>>, >
shape: #ttnn.shape<1x1x784x672>
tensor<[1,1,784,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1392>>, >
shape: #ttnn.shape<1x1x1x1392>
tensor<[1,1,1,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x1392x1x1>
tensor<[1,1392,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1392 + d1 + d2, d3), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x3712>>, >
shape: #ttnn.shape<1x1x1x3712>
tensor<[1,1,1,3712,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3712, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<116x1>>, >
shape: #ttnn.shape<1x3712x1x1>
tensor<[1,3712,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3712 + d1 + d2, d3), memory_config: (116, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x1280>>, >
shape: #ttnn.shape<1x1x81x1280>
tensor<[1,1,81,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x68>>, >
shape: #ttnn.shape<1x1x196x68>
tensor<[1,1,196,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 68, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x98>>, >
shape: #ttnn.shape<1x1x784x98>
tensor<[1,1,784,98,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 98, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x144>>, >
shape: #ttnn.shape<1x1x49x144>
tensor<[1,1,49,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x18>>, >
shape: #ttnn.shape<1x1x784x18>
tensor<[1,1,784,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x36>>, >
shape: #ttnn.shape<1x1x784x36>
tensor<[1,1,784,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x36>>, >
shape: #ttnn.shape<1x1x196x36>
tensor<[1,1,196,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x64>>, >
shape: #ttnn.shape<1x1x784x64>
tensor<[1,1,784,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x72>>, >
shape: #ttnn.shape<1x1x196x72>
tensor<[1,1,196,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x36>>, >
shape: #ttnn.shape<1x1x3136x36>
tensor<[1,1,3136,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x3712>>, >
shape: #ttnn.shape<1x1x49x3712>
tensor<[1,1,49,3712,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 3712, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x348>>, >
shape: #ttnn.shape<1x1x1x348>
tensor<[1,1,1,348,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 348, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x348x1x1>
tensor<[1,348,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 348 + d1 + d2, d3), memory_config: (11, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x3712>>, >
shape: #ttnn.shape<1x1x49x3712>
tensor<[1,1,49,3712,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 3712, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1280>>, >
shape: #ttnn.shape<1x1x100x1280>
tensor<[1,1,100,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x384>>, >
shape: #ttnn.shape<1x1x196x384>
tensor<[1,1,196,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x64>>, >
shape: #ttnn.shape<1x1x196x64>
tensor<[1,1,196,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x96>>, >
shape: #ttnn.shape<1x1x196x96>
tensor<[1,1,196,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x192>>, >
shape: #ttnn.shape<1x1x1225x192>
tensor<[1,1,1225,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x384>>, >
shape: #ttnn.shape<1x1x289x384>
tensor<[1,1,289,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x64>>, >
shape: #ttnn.shape<1x1x1225x64>
tensor<[1,1,1225,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x96>>, >
shape: #ttnn.shape<1x1x1225x96>
tensor<[1,1,1225,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x256>>, >
shape: #ttnn.shape<1x1x64x256>
tensor<[1,1,64,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x256>>, >
shape: #ttnn.shape<1x1x64x256>
tensor<[1,1,64,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x448>>, >
shape: #ttnn.shape<1x1x64x448>
tensor<[1,1,64,448,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 448, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x192>>, >
shape: #ttnn.shape<1x1x65536x192>
tensor<[1,1,65536,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1536x8>>, >
shape: #ttnn.shape<1x192x256x256>
tensor<[1,192,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49152 + d1 * 256 + d2, d3), memory_config: (1536, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x16>>, >
shape: #ttnn.shape<1x1x12544x16>
tensor<[1,1,12544,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x16>>, >
shape: #ttnn.shape<1x1x50176x16>
tensor<[1,1,50176,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x1x12544x32>
tensor<[1,1,12544,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x64>>, >
shape: #ttnn.shape<1x1x50176x64>
tensor<[1,1,50176,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x7>>, >
shape: #ttnn.shape<1x64x224x224>
tensor<[1,64,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 224 + d2, d3), memory_config: (448, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x64>>, >
shape: #ttnn.shape<1x1x50176x64>
tensor<[1,1,50176,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x768>>, >
shape: #ttnn.shape<1x1x196x768>
tensor<[1,1,196,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x768>>, >
shape: #ttnn.shape<1x1x49x768>
tensor<[1,1,49,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x1x12544x32>
tensor<[1,1,12544,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x32>>, >
shape: #ttnn.shape<1x1x14400x32>
tensor<[1,1,14400,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 120 + d2, d3), memory_config: (14400, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x32>>, >
shape: #ttnn.shape<1x1x65536x32>
tensor<[1,1,65536,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16900x32>>, >
shape: #ttnn.shape<1x1x16900x32>
tensor<[1,1,16900,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16900 + d1 * 130 + d2, d3), memory_config: (16900, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22201x32>>, >
shape: #ttnn.shape<1x1x22201x32>
tensor<[1,1,22201,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22201 + d1 * 149 + d2, d3), memory_config: (22201, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x32>>, >
shape: #ttnn.shape<1x1x22500x32>
tensor<[1,1,22500,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x32>>, >
shape: #ttnn.shape<1x1x22500x32>
tensor<[1,1,22500,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25600x16>>, >
shape: #ttnn.shape<1x1x25600x16>
tensor<[1,1,25600,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25600 + d1 * 160 + d2, d3), memory_config: (25600, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x768>>, >
shape: #ttnn.shape<1x1x256x768>
tensor<[1,1,256,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (256, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<192x1>>, >
shape: #ttnn.shape<1x768x8x32>
tensor<[1,768,8,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6144 + d1 * 8 + d2, d3), memory_config: (192, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<36100x32>>, >
shape: #ttnn.shape<1x1x36100x32>
tensor<[1,1,36100,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36100 + d1 * 190 + d2, d3), memory_config: (36100, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<192x768>>, >
shape: #ttnn.shape<1x1x192x768>
tensor<[1,1,192,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 16 + d2, d3), memory_config: (192, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x1>>, >
shape: #ttnn.shape<1x768x12x16>
tensor<[1,768,12,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 12 + d2, d3), memory_config: (288, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<19200x64>>, >
shape: #ttnn.shape<1x1x19200x64>
tensor<[1,1,19200,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 160 + d2, d3), memory_config: (19200, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x192>>, >
shape: #ttnn.shape<1x1x16384x192>
tensor<[1,1,16384,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x4>>, >
shape: #ttnn.shape<1x192x128x128>
tensor<[1,192,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 128 + d2, d3), memory_config: (768, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<262144x32>>, >
shape: #ttnn.shape<1x1x262144x32>
tensor<[1,1,262144,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 262144 + d1 * 512 + d2, d3), memory_config: (262144, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x32>>, >
shape: #ttnn.shape<1x1x65536x32>
tensor<[1,1,65536,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x32>>, >
shape: #ttnn.shape<1x1x16384x32>
tensor<[1,1,16384,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<1x32x128x128>
tensor<[1,32,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1344x192>>, >
shape: #ttnn.shape<1x1x1344x192>
tensor<[1,1,1344,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 42 + d2, d3), memory_config: (1344, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<192x2>>, >
shape: #ttnn.shape<1x192x32x42>
tensor<[1,192,32,42,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6144 + d1 * 32 + d2, d3), memory_config: (192, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1369x1280>>, >
shape: #ttnn.shape<1x1x1369x1280>
tensor<[1,1,1369,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1369 + d1 * 37 + d2, d3), memory_config: (1369, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1480x2>>, >
shape: #ttnn.shape<1x1280x37x37>
tensor<[1,1280,37,37,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 47360 + d1 * 37 + d2, d3), memory_config: (1480, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<230400x64>>, >
shape: #ttnn.shape<1x1x230400x64>
tensor<[1,1,230400,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 230400 + d1 * 640 + d2, d3), memory_config: (230400, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x120>>, >
shape: #ttnn.shape<1x1x196x120>
tensor<[1,1,196,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x80>>, >
shape: #ttnn.shape<1x1x196x80>
tensor<[1,1,196,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x120>>, >
shape: #ttnn.shape<1x1x784x120>
tensor<[1,1,784,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x240>>, >
shape: #ttnn.shape<1x1x784x240>
tensor<[1,1,784,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x60>>, >
shape: #ttnn.shape<1x1x784x60>
tensor<[1,1,784,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 60, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<900x240>>, >
shape: #ttnn.shape<1x1x900x240>
tensor<[1,1,900,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 900 + d1 * 30 + d2, d3), memory_config: (900, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1600x120>>, >
shape: #ttnn.shape<1x1x1600x120>
tensor<[1,1,1600,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (1600, 120, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1600x240>>, >
shape: #ttnn.shape<1x1x1600x240>
tensor<[1,1,1600,240,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (1600, 240, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x14>>, >
shape: #ttnn.shape<1x1x3136x14>
tensor<[1,1,3136,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x116>>, >
shape: #ttnn.shape<1x1x196x116>
tensor<[1,1,196,116,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 116, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1280>>, >
shape: #ttnn.shape<1x1x144x1280>
tensor<[1,1,144,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x512>>, >
shape: #ttnn.shape<1x1x64x512>
tensor<[1,1,64,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x168>>, >
shape: #ttnn.shape<1x1x784x168>
tensor<[1,1,784,168,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 168, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x16>>, >
shape: #ttnn.shape<1x1x784x16>
tensor<[1,1,784,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x24>>, >
shape: #ttnn.shape<1x1x100x24>
tensor<[1,1,100,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x24x10x10>
tensor<[1,24,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 * 10 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x256>>, >
shape: #ttnn.shape<1x1x100x256>
tensor<[1,1,100,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 256, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x480>>, >
shape: #ttnn.shape<1x1x100x480>
tensor<[1,1,100,480,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 480, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x480>>, >
shape: #ttnn.shape<1x1x100x480>
tensor<[1,1,100,480,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 480, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x546>>, >
shape: #ttnn.shape<1x1x100x546>
tensor<[1,1,100,546,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 546, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<171x1>>, >
shape: #ttnn.shape<1x546x10x10>
tensor<[1,546,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5460 + d1 * 10 + d2, d3), memory_config: (171, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x80>>, >
shape: #ttnn.shape<1x1x100x80>
tensor<[1,1,100,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 80, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x112>>, >
shape: #ttnn.shape<1x1x196x112>
tensor<[1,1,196,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x16>>, >
shape: #ttnn.shape<1x1x196x16>
tensor<[1,1,196,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x192>>, >
shape: #ttnn.shape<1x1x196x192>
tensor<[1,1,196,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x480>>, >
shape: #ttnn.shape<1x1x196x480>
tensor<[1,1,196,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x480>>, >
shape: #ttnn.shape<1x1x196x480>
tensor<[1,1,196,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x56>>, >
shape: #ttnn.shape<1x1x196x56>
tensor<[1,1,196,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x64>>, >
shape: #ttnn.shape<1x1x196x64>
tensor<[1,1,196,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x80>>, >
shape: #ttnn.shape<1x1x196x80>
tensor<[1,1,196,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x96>>, >
shape: #ttnn.shape<1x1x196x96>
tensor<[1,1,196,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x112>>, >
shape: #ttnn.shape<1x1x225x112>
tensor<[1,1,225,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x480>>, >
shape: #ttnn.shape<1x1x225x480>
tensor<[1,1,225,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x480>>, >
shape: #ttnn.shape<1x1x225x480>
tensor<[1,1,225,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x80>>, >
shape: #ttnn.shape<1x1x225x80>
tensor<[1,1,225,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x120>>, >
shape: #ttnn.shape<1x1x1x120>
tensor<[1,1,1,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x112>>, >
shape: #ttnn.shape<1x1x400x112>
tensor<[1,1,400,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 112, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x480>>, >
shape: #ttnn.shape<1x1x400x480>
tensor<[1,1,400,480,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 480, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x480>>, >
shape: #ttnn.shape<1x1x49x480>
tensor<[1,1,49,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x480>>, >
shape: #ttnn.shape<1x1x49x480>
tensor<[1,1,49,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x480>>, >
shape: #ttnn.shape<1x1x49x480>
tensor<[1,1,49,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x48>>, >
shape: #ttnn.shape<1x1x3136x48>
tensor<[1,1,3136,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1089x288>>, >
shape: #ttnn.shape<1x1x1089x288>
tensor<[1,1,1089,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1089 + d1 * 33 + d2, d3), memory_config: (1089, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x288>>, >
shape: #ttnn.shape<1x1x1444x288>
tensor<[1,1,1444,288,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 288, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x12>>, >
shape: #ttnn.shape<1x1x3136x12>
tensor<[1,1,3136,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 12, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x112>>, >
shape: #ttnn.shape<1x1x196x112>
tensor<[1,1,196,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x144>>, >
shape: #ttnn.shape<1x1x196x144>
tensor<[1,1,196,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x160>>, >
shape: #ttnn.shape<1x1x196x160>
tensor<[1,1,196,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x192>>, >
shape: #ttnn.shape<1x1x196x192>
tensor<[1,1,196,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x24>>, >
shape: #ttnn.shape<1x1x196x24>
tensor<[1,1,196,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x32>>, >
shape: #ttnn.shape<1x1x196x32>
tensor<[1,1,196,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x64>>, >
shape: #ttnn.shape<1x1x196x64>
tensor<[1,1,196,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<300x64>>, >
shape: #ttnn.shape<1x1x300x64>
tensor<[1,1,300,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 20 + d2, d3), memory_config: (300, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x64x15x20>
tensor<[1,64,15,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 15 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1024>>, >
shape: #ttnn.shape<1x1x256x1024>
tensor<[1,1,256,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x255>>, >
shape: #ttnn.shape<1x1x256x255>
tensor<[1,1,256,255,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 255, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x255x16x16>
tensor<[1,255,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4080 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x256>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x512>>, >
shape: #ttnn.shape<1x1x256x512>
tensor<[1,1,256,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x512>>, >
shape: #ttnn.shape<1x1x256x512>
tensor<[1,1,256,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1000>>, >
shape: #ttnn.shape<1x1x1x1000>
tensor<[1,1,1,1000,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1000, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1000x1x1>
tensor<[1,1000,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1000 + d1 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x1x1x512>
tensor<[1,1,1,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x512x1x1>
tensor<[1,512,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<920x2048>>, >
shape: #ttnn.shape<1x1x920x2048>
tensor<[1,1,920,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 920 + d1 * 40 + d2, d3), memory_config: (920, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<920x512>>, >
shape: #ttnn.shape<1x1x920x512>
tensor<[1,1,920,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 920 + d1 * 40 + d2, d3), memory_config: (920, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x1024>>, >
shape: #ttnn.shape<1x1x784x1024>
tensor<[1,1,784,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x1x196x1024>
tensor<[1,1,196,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x19>>, >
shape: #ttnn.shape<1x1x784x19>
tensor<[1,1,784,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 19, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x1>>, >
shape: #ttnn.shape<1x19x28x28>
tensor<[1,19,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 532 + d1 * 28 + d2, d3), memory_config: (17, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x38>>, >
shape: #ttnn.shape<1x1x784x38>
tensor<[1,1,784,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 38, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<34x1>>, >
shape: #ttnn.shape<1x38x28x28>
tensor<[1,38,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1064 + d1 * 28 + d2, d3), memory_config: (34, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1024>>, >
shape: #ttnn.shape<1x1x256x1024>
tensor<[1,1,256,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x128>>, >
shape: #ttnn.shape<1x1x1024x128>
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x255>>, >
shape: #ttnn.shape<1x1x1024x255>
tensor<[1,1,1024,255,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 255, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<255x1>>, >
shape: #ttnn.shape<1x255x32x32>
tensor<[1,255,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8160 + d1 * 32 + d2, d3), memory_config: (255, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<920x512>>, >
shape: #ttnn.shape<1x1x920x512>
tensor<[1,1,920,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 920 + d1 * 40 + d2, d3), memory_config: (920, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x128>>, >
shape: #ttnn.shape<1x1x25x128>
tensor<[1,1,25,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 5 + d2, d3), memory_config: (25, 128, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x24>>, >
shape: #ttnn.shape<1x1x25x24>
tensor<[1,1,25,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 5 + d2, d3), memory_config: (25, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x24x5x5>
tensor<[1,24,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 5 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x512>>, >
shape: #ttnn.shape<1x1x25x512>
tensor<[1,1,25,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 5 + d2, d3), memory_config: (25, 512, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x546>>, >
shape: #ttnn.shape<1x1x25x546>
tensor<[1,1,25,546,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 5 + d2, d3), memory_config: (25, 546, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x546x5x5>
tensor<[1,546,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2730 + d1 * 5 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4800x512>>, >
shape: #ttnn.shape<1x1x4800x512>
tensor<[1,1,4800,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 80 + d2, d3), memory_config: (4800, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x3>>, >
shape: #ttnn.shape<1x512x60x80>
tensor<[1,512,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 60 + d2, d3), memory_config: (960, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x2048>>, >
shape: #ttnn.shape<1x1x49x2048>
tensor<[1,1,49,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 2048, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x256>>, >
shape: #ttnn.shape<1x1x64x256>
tensor<[1,1,64,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x256>>, >
shape: #ttnn.shape<1x1x64x256>
tensor<[1,1,64,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x1024>>, >
shape: #ttnn.shape<1x1x3600x1024>
tensor<[1,1,3600,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 80 + d2, d3), memory_config: (3600, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x128>>, >
shape: #ttnn.shape<1x1x14400x128>
tensor<[1,1,14400,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 160 + d2, d3), memory_config: (14400, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14400x256>>, >
shape: #ttnn.shape<1x1x14400x256>
tensor<[1,1,14400,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 160 + d2, d3), memory_config: (14400, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x160>>, >
shape: #ttnn.shape<1x1x196x160>
tensor<[1,1,196,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x32>>, >
shape: #ttnn.shape<1x1x196x32>
tensor<[1,1,196,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x120>>, >
shape: #ttnn.shape<1x1x289x120>
tensor<[1,1,289,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x528>>, >
shape: #ttnn.shape<1x1x289x528>
tensor<[1,1,289,528,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 528, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x528>>, >
shape: #ttnn.shape<1x1x289x528>
tensor<[1,1,289,528,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 528, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x88>>, >
shape: #ttnn.shape<1x1x289x88>
tensor<[1,1,289,88,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 88, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x196>>, >
shape: #ttnn.shape<1x1x196x196>
tensor<[1,1,196,196,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 196, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x56>>, >
shape: #ttnn.shape<1x1x196x56>
tensor<[1,1,196,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2304x336>>, >
shape: #ttnn.shape<1x1x2304x336>
tensor<[1,1,2304,336,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2304 + d1 * 48 + d2, d3), memory_config: (2304, 336, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x576>>, >
shape: #ttnn.shape<1x1x196x576>
tensor<[1,1,196,576,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 576, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x576>>, >
shape: #ttnn.shape<1x1x49x576>
tensor<[1,1,49,576,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 576, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x96>>, >
shape: #ttnn.shape<1x1x196x96>
tensor<[1,1,196,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x136>>, >
shape: #ttnn.shape<1x1x361x136>
tensor<[1,1,361,136,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 136, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x576>>, >
shape: #ttnn.shape<1x1x361x576>
tensor<[1,1,361,576,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 576, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x576>>, >
shape: #ttnn.shape<1x1x361x576>
tensor<[1,1,361,576,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 576, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x96>>, >
shape: #ttnn.shape<1x1x361x96>
tensor<[1,1,361,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x232>>, >
shape: #ttnn.shape<1x1x1x232>
tensor<[1,1,1,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x232x1x1>
tensor<[1,232,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 232 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x696>>, >
shape: #ttnn.shape<1x1x1x696>
tensor<[1,1,1,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<22x1>>, >
shape: #ttnn.shape<1x696x1x1>
tensor<[1,696,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 696 + d1 + d2, d3), memory_config: (22, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x60>>, >
shape: #ttnn.shape<1x1x784x60>
tensor<[1,1,784,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 60, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x28>>, >
shape: #ttnn.shape<1x1x784x28>
tensor<[1,1,784,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1280>>, >
shape: #ttnn.shape<1x1x256x1280>
tensor<[1,1,256,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x640>>, >
shape: #ttnn.shape<1x1x256x640>
tensor<[1,1,256,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x640x16x16>
tensor<[1,640,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 16 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x640>>, >
shape: #ttnn.shape<1x1x4096x640>
tensor<[1,1,4096,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x2>>, >
shape: #ttnn.shape<1x640x64x64>
tensor<[1,640,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 64 + d2, d3), memory_config: (1280, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x128>>, >
shape: #ttnn.shape<1x1x12544x128>
tensor<[1,1,12544,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x4>>, >
shape: #ttnn.shape<1x128x112x112>
tensor<[1,128,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 112 + d2, d3), memory_config: (448, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x128>>, >
shape: #ttnn.shape<1x1x12544x128>
tensor<[1,1,12544,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x64>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4800x128>>, >
shape: #ttnn.shape<1x1x4800x128>
tensor<[1,1,4800,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 80 + d2, d3), memory_config: (4800, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x3>>, >
shape: #ttnn.shape<1x128x60x80>
tensor<[1,128,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 60 + d2, d3), memory_config: (240, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<19200x32>>, >
shape: #ttnn.shape<1x1x19200x32>
tensor<[1,1,19200,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 160 + d2, d3), memory_config: (19200, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x5>>, >
shape: #ttnn.shape<1x32x120x160>
tensor<[1,32,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<300x64>>, >
shape: #ttnn.shape<1x1x300x64>
tensor<[1,1,300,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 20 + d2, d3), memory_config: (300, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x64x15x20>
tensor<[1,64,15,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 15 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x128>>, >
shape: #ttnn.shape<1x1x16384x128>
tensor<[1,1,16384,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x32>>, >
shape: #ttnn.shape<1x1x16384x32>
tensor<[1,1,16384,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x64>>, >
shape: #ttnn.shape<1x1x16384x64>
tensor<[1,1,16384,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x64>>, >
shape: #ttnn.shape<1x1x16384x64>
tensor<[1,1,16384,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5329x96>>, >
shape: #ttnn.shape<1x1x5329x96>
tensor<[1,1,5329,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 73 + d2, d3), memory_config: (5329, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x384>>, >
shape: #ttnn.shape<1x1x196x384>
tensor<[1,1,196,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x128>>, >
shape: #ttnn.shape<1x1x22500x128>
tensor<[1,1,22500,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5625x128>>, >
shape: #ttnn.shape<1x1x5625x128>
tensor<[1,1,5625,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5625 + d1 * 75 + d2, d3), memory_config: (5625, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<22500x64>>, >
shape: #ttnn.shape<1x1x22500x64>
tensor<[1,1,22500,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22500 + d1 * 150 + d2, d3), memory_config: (22500, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6400x64>>, >
shape: #ttnn.shape<1x1x6400x64>
tensor<[1,1,6400,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6400 + d1 * 80 + d2, d3), memory_config: (6400, 64, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<57600x256>>, >
shape: #ttnn.shape<1x1x57600x256>
tensor<[1,1,57600,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 320 + d2, d3), memory_config: (57600, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<57600x64>>, >
shape: #ttnn.shape<1x1x57600x64>
tensor<[1,1,57600,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 320 + d2, d3), memory_config: (57600, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<57600x64>>, >
shape: #ttnn.shape<1x1x57600x64>
tensor<[1,1,57600,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 320 + d2, d3), memory_config: (57600, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x1x128>
tensor<[1,1,1,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 128, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x1>>, >
shape: #ttnn.shape<1x1x50176x1>
tensor<[1,1,50176,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x64>>, >
shape: #ttnn.shape<1x1x50176x64>
tensor<[1,1,50176,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x7>>, >
shape: #ttnn.shape<1x64x224x224>
tensor<[1,64,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 224 + d2, d3), memory_config: (448, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<50176x64>>, >
shape: #ttnn.shape<1x1x50176x64>
tensor<[1,1,50176,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 50176 + d1 * 224 + d2, d3), memory_config: (50176, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x128>>, >
shape: #ttnn.shape<1x1x16384x128>
tensor<[1,1,16384,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x32>>, >
shape: #ttnn.shape<1x1x65536x32>
tensor<[1,1,65536,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x32>>, >
shape: #ttnn.shape<1x1x65536x32>
tensor<[1,1,65536,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x256>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x64>>, >
shape: #ttnn.shape<1x1x784x64>
tensor<[1,1,784,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x1x1x64>
tensor<[1,1,1,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 64, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1200x32>>, >
shape: #ttnn.shape<1x1x1200x32>
tensor<[1,1,1200,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 40 + d2, d3), memory_config: (1200, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x32x30x40>
tensor<[1,32,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x96>>, >
shape: #ttnn.shape<1x1x1225x96>
tensor<[1,1,1225,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<307200x1>>, >
shape: #ttnn.shape<1x1x307200x1>
tensor<[1,1,307200,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 307200 + d1 * 640 + d2, d3), memory_config: (307200, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x20>>, >
shape: #ttnn.shape<1x1x480x640>
tensor<[1,1,480,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 * 480 + d2, d3), memory_config: (15, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<307200x64>>, >
shape: #ttnn.shape<1x1x307200x64>
tensor<[1,1,307200,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 307200 + d1 * 640 + d2, d3), memory_config: (307200, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x20>>, >
shape: #ttnn.shape<1x64x480x640>
tensor<[1,64,480,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 480 + d2, d3), memory_config: (960, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x14>>, >
shape: #ttnn.shape<1x1x3136x14>
tensor<[1,1,3136,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x192>>, >
shape: #ttnn.shape<1x1x3136x192>
tensor<[1,1,3136,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x256>>, >
shape: #ttnn.shape<1x1x3136x256>
tensor<[1,1,3136,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x64>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4800x32>>, >
shape: #ttnn.shape<1x1x4800x32>
tensor<[1,1,4800,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 80 + d2, d3), memory_config: (4800, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x32x60x80>
tensor<[1,32,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x128>>, >
shape: #ttnn.shape<1x1x4096x128>
tensor<[1,1,4096,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x160>>, >
shape: #ttnn.shape<1x1x1024x160>
tensor<[1,1,1024,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x1>>, >
shape: #ttnn.shape<1x160x32x32>
tensor<[1,160,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 32 + d2, d3), memory_config: (160, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x64>>, >
shape: #ttnn.shape<1x1x4096x64>
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x64>>, >
shape: #ttnn.shape<1x1x4096x64>
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x64>>, >
shape: #ttnn.shape<1x1x256x64>
tensor<[1,1,256,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x64x16x16>
tensor<[1,64,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 16 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5329x64>>, >
shape: #ttnn.shape<1x1x5329x64>
tensor<[1,1,5329,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 73 + d2, d3), memory_config: (5329, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5329x64>>, >
shape: #ttnn.shape<1x1x5329x64>
tensor<[1,1,5329,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 73 + d2, d3), memory_config: (5329, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5041x96>>, >
shape: #ttnn.shape<1x1x5041x96>
tensor<[1,1,5041,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5041 + d1 * 71 + d2, d3), memory_config: (5041, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6400x24>>, >
shape: #ttnn.shape<1x1x6400x24>
tensor<[1,1,6400,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6400 + d1 * 80 + d2, d3), memory_config: (6400, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x640>>, >
shape: #ttnn.shape<1x1x196x640>
tensor<[1,1,196,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x80>>, >
shape: #ttnn.shape<1x1x100x80>
tensor<[1,1,100,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 80, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x112>>, >
shape: #ttnn.shape<1x1x196x112>
tensor<[1,1,196,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x56>>, >
shape: #ttnn.shape<1x1x196x56>
tensor<[1,1,196,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x672>>, >
shape: #ttnn.shape<1x1x196x672>
tensor<[1,1,196,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x672>>, >
shape: #ttnn.shape<1x1x196x672>
tensor<[1,1,196,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x672>>, >
shape: #ttnn.shape<1x1x49x672>
tensor<[1,1,49,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x112>>, >
shape: #ttnn.shape<1x1x225x112>
tensor<[1,1,225,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x672>>, >
shape: #ttnn.shape<1x1x225x672>
tensor<[1,1,225,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x672>>, >
shape: #ttnn.shape<1x1x49x672>
tensor<[1,1,49,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x672>>, >
shape: #ttnn.shape<1x1x64x672>
tensor<[1,1,64,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x168>>, >
shape: #ttnn.shape<1x1x1x168>
tensor<[1,1,1,168,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 168, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x168x1x1>
tensor<[1,168,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x112>>, >
shape: #ttnn.shape<1x1x400x112>
tensor<[1,1,400,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 112, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x24>>, >
shape: #ttnn.shape<1x1x400x24>
tensor<[1,1,400,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x24x20x20>
tensor<[1,24,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 * 20 + d2, d3), memory_config: (15, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x546>>, >
shape: #ttnn.shape<1x1x400x546>
tensor<[1,1,400,546,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 546, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x1>>, >
shape: #ttnn.shape<1x546x20x20>
tensor<[1,546,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10920 + d1 * 20 + d2, d3), memory_config: (342, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x672>>, >
shape: #ttnn.shape<1x1x400x672>
tensor<[1,1,400,672,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 672, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x672>>, >
shape: #ttnn.shape<1x1x100x672>
tensor<[1,1,100,672,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 672, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x112>>, >
shape: #ttnn.shape<1x1x576x112>
tensor<[1,1,576,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x160>>, >
shape: #ttnn.shape<1x1x576x160>
tensor<[1,1,576,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x672>>, >
shape: #ttnn.shape<1x1x576x672>
tensor<[1,1,576,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x672>>, >
shape: #ttnn.shape<1x1x576x672>
tensor<[1,1,576,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x1344>>, >
shape: #ttnn.shape<1x1x784x1344>
tensor<[1,1,784,1344,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 1344, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1344>>, >
shape: #ttnn.shape<1x1x196x1344>
tensor<[1,1,196,1344,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1344, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x672>>, >
shape: #ttnn.shape<1x1x784x672>
tensor<[1,1,784,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x672>>, >
shape: #ttnn.shape<1x1x784x672>
tensor<[1,1,784,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x672>>, >
shape: #ttnn.shape<1x1x784x672>
tensor<[1,1,784,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x192>>, >
shape: #ttnn.shape<1x1x49x192>
tensor<[1,1,49,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x672>>, >
shape: #ttnn.shape<1x1x49x672>
tensor<[1,1,49,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x672>>, >
shape: #ttnn.shape<1x1x49x672>
tensor<[1,1,49,672,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 672, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x80>>, >
shape: #ttnn.shape<1x1x49x80>
tensor<[1,1,49,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x192>>, >
shape: #ttnn.shape<1x1x64x192>
tensor<[1,1,64,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 8 + d2, d3), memory_config: (64, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x40>>, >
shape: #ttnn.shape<1x1x196x40>
tensor<[1,1,196,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x174>>, >
shape: #ttnn.shape<1x1x1x174>
tensor<[1,1,1,174,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 174, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x174x1x1>
tensor<[1,174,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 174 + d1 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x58>>, >
shape: #ttnn.shape<1x1x1x58>
tensor<[1,1,1,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 58, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x58x1x1>
tensor<[1,58,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 58 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x1392>>, >
shape: #ttnn.shape<1x1x784x1392>
tensor<[1,1,784,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1392>>, >
shape: #ttnn.shape<1x1x196x1392>
tensor<[1,1,196,1392,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 1392, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x696>>, >
shape: #ttnn.shape<1x1x784x696>
tensor<[1,1,784,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x696>>, >
shape: #ttnn.shape<1x1x784x696>
tensor<[1,1,784,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x696>>, >
shape: #ttnn.shape<1x1x784x696>
tensor<[1,1,784,696,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 696, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x120>>, >
shape: #ttnn.shape<1x1x289x120>
tensor<[1,1,289,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x720>>, >
shape: #ttnn.shape<1x1x289x720>
tensor<[1,1,289,720,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 720, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x720>>, >
shape: #ttnn.shape<1x1x81x720>
tensor<[1,1,81,720,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 720, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<81x208>>, >
shape: #ttnn.shape<1x1x81x208>
tensor<[1,1,81,208,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 81 + d1 * 9 + d2, d3), memory_config: (81, 208, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x1024>>, >
shape: #ttnn.shape<1x1x361x1024>
tensor<[1,1,361,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x1024>>, >
shape: #ttnn.shape<1x1x100x1024>
tensor<[1,1,100,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x728>>, >
shape: #ttnn.shape<1x1x361x728>
tensor<[1,1,361,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x728>>, >
shape: #ttnn.shape<1x1x361x728>
tensor<[1,1,361,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x728>>, >
shape: #ttnn.shape<1x1x1444x728>
tensor<[1,1,1444,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x728>>, >
shape: #ttnn.shape<1x1x361x728>
tensor<[1,1,361,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1444x728>>, >
shape: #ttnn.shape<1x1x1444x728>
tensor<[1,1,1444,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1444 + d1 * 38 + d2, d3), memory_config: (1444, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x728>>, >
shape: #ttnn.shape<1x1x361x728>
tensor<[1,1,361,728,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 728, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x144>>, >
shape: #ttnn.shape<1x1x49x144>
tensor<[1,1,49,144,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 144, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x18>>, >
shape: #ttnn.shape<1x1x196x18>
tensor<[1,1,196,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 18, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x36>>, >
shape: #ttnn.shape<1x1x196x36>
tensor<[1,1,196,36,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 36, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x512>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x72>>, >
shape: #ttnn.shape<1x1x196x72>
tensor<[1,1,196,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x20>>, >
shape: #ttnn.shape<1x1x1x20>
tensor<[1,1,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20x1x1>
tensor<[1,20,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x1x24>
tensor<[1,1,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24x1x1>
tensor<[1,24,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x40>>, >
shape: #ttnn.shape<1x1x784x40>
tensor<[1,1,784,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x72>>, >
shape: #ttnn.shape<1x1x784x72>
tensor<[1,1,784,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x72>>, >
shape: #ttnn.shape<1x1x784x72>
tensor<[1,1,784,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1600x40>>, >
shape: #ttnn.shape<1x1x1600x40>
tensor<[1,1,1600,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (1600, 40, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x12>>, >
shape: #ttnn.shape<1x1x3136x12>
tensor<[1,1,3136,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 12, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x72>>, >
shape: #ttnn.shape<1x1x3136x72>
tensor<[1,1,3136,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x72>>, >
shape: #ttnn.shape<1x1x784x72>
tensor<[1,1,784,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 72, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6400x24>>, >
shape: #ttnn.shape<1x1x6400x24>
tensor<[1,1,6400,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6400 + d1 * 80 + d2, d3), memory_config: (6400, 24, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6400x72>>, >
shape: #ttnn.shape<1x1x6400x72>
tensor<[1,1,6400,72,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6400 + d1 * 80 + d2, d3), memory_config: (6400, 72, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1600x72>>, >
shape: #ttnn.shape<1x1x1600x72>
tensor<[1,1,1600,72,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (1600, 72, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x512>>, >
shape: #ttnn.shape<1x1x784x512>
tensor<[1,1,784,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x334>>, >
shape: #ttnn.shape<1x1x196x334>
tensor<[1,1,196,334,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 334, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x1x768>
tensor<[1,1,1,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x1x1>
tensor<[1,768,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 + d2, d3), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<257x27>>, >
shape: #ttnn.shape<1x1x257x27>
tensor<[1,1,257,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 257 + d1 + d2, d3), memory_config: (257, 27, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<257x768>>, >
shape: #ttnn.shape<1x1x257x768>
tensor<[1,1,257,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 257 + d1 + d2, d3), memory_config: (257, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1500x768>>, >
shape: #ttnn.shape<1x1x1500x768>
tensor<[1,1,1500,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1500 + d1 + d2, d3), memory_config: (1500, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x47>>, >
shape: #ttnn.shape<1x768x1500>
tensor<[1,768,1500,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 47, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x256>>, >
shape: #ttnn.shape<1x1x1024x256>
tensor<[1,1,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x224>>, >
shape: #ttnn.shape<1x1x49x224>
tensor<[1,1,49,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x3072>>, >
shape: #ttnn.shape<1x1x8x3072>
tensor<[1,1,8,3072,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (8, 3072, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x768>>, >
shape: #ttnn.shape<1x1x8x768>
tensor<[1,1,8,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (8, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x8>
tensor<[1,768,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x768>>, >
shape: #ttnn.shape<1x1x8x768>
tensor<[1,1,8,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (8, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x8>
tensor<[1,768,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x1024>>, >
shape: #ttnn.shape<1x1x49x1024>
tensor<[1,1,49,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x16>>, >
shape: #ttnn.shape<1x1x784x16>
tensor<[1,1,784,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x34>>, >
shape: #ttnn.shape<1x1x784x34>
tensor<[1,1,784,34,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 34, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x272>>, >
shape: #ttnn.shape<1x1x49x272>
tensor<[1,1,49,272,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 272, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x480>>, >
shape: #ttnn.shape<1x1x100x480>
tensor<[1,1,100,480,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 480, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x100>>, >
shape: #ttnn.shape<1x1x196x100>
tensor<[1,1,196,100,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 100, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x112>>, >
shape: #ttnn.shape<1x1x196x112>
tensor<[1,1,196,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 112, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x184>>, >
shape: #ttnn.shape<1x1x196x184>
tensor<[1,1,196,184,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 184, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x200>>, >
shape: #ttnn.shape<1x1x196x200>
tensor<[1,1,196,200,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 200, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x240>>, >
shape: #ttnn.shape<1x1x196x240>
tensor<[1,1,196,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x480>>, >
shape: #ttnn.shape<1x1x196x480>
tensor<[1,1,196,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x80>>, >
shape: #ttnn.shape<1x1x196x80>
tensor<[1,1,196,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x92>>, >
shape: #ttnn.shape<1x1x196x92>
tensor<[1,1,196,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 92, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<225x480>>, >
shape: #ttnn.shape<1x1x225x480>
tensor<[1,1,225,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 225 + d1 * 15 + d2, d3), memory_config: (225, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x184>>, >
shape: #ttnn.shape<1x1x400x184>
tensor<[1,1,400,184,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 184, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x200>>, >
shape: #ttnn.shape<1x1x400x200>
tensor<[1,1,400,200,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 200, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<400x480>>, >
shape: #ttnn.shape<1x1x400x480>
tensor<[1,1,400,480,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 400 + d1 * 20 + d2, d3), memory_config: (400, 480, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3000x768>>, >
shape: #ttnn.shape<1x1x3000x768>
tensor<[1,1,3000,768,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3000 + d1 + d2, d3), memory_config: (3000, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x94>>, >
shape: #ttnn.shape<1x768x3000>
tensor<[1,768,3000,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 94, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x184>>, >
shape: #ttnn.shape<1x1x49x184>
tensor<[1,1,49,184,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 184, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x200>>, >
shape: #ttnn.shape<1x1x49x200>
tensor<[1,1,49,200,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 200, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x480>>, >
shape: #ttnn.shape<1x1x49x480>
tensor<[1,1,49,480,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 480, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x80>>, >
shape: #ttnn.shape<1x1x49x80>
tensor<[1,1,49,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x232>>, >
shape: #ttnn.shape<1x1x100x232>
tensor<[1,1,100,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x136>>, >
shape: #ttnn.shape<1x1x361x136>
tensor<[1,1,361,136,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 136, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x816>>, >
shape: #ttnn.shape<1x1x361x816>
tensor<[1,1,361,816,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 816, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x816>>, >
shape: #ttnn.shape<1x1x100x816>
tensor<[1,1,100,816,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 10 + d2, d3), memory_config: (100, 816, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x192>>, >
shape: #ttnn.shape<1x1x49x192>
tensor<[1,1,49,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x256>>, >
shape: #ttnn.shape<1x1x49x256>
tensor<[1,1,49,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x32>>, >
shape: #ttnn.shape<1x1x49x32>
tensor<[1,1,49,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x384>>, >
shape: #ttnn.shape<1x1x49x384>
tensor<[1,1,49,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 384, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x48>>, >
shape: #ttnn.shape<1x1x49x48>
tensor<[1,1,49,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 48, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<289x528>>, >
shape: #ttnn.shape<1x1x289x528>
tensor<[1,1,289,528,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (289, 528, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x1x196x256>
tensor<[1,1,196,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x8>>, >
shape: #ttnn.shape<1x1x12544x8>
tensor<[1,1,12544,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (12544, 8, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x232>>, >
shape: #ttnn.shape<1x1x1x232>
tensor<[1,1,1,232,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 232, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x232x1x1>
tensor<[1,232,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 232 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x92>>, >
shape: #ttnn.shape<1x1x196x92>
tensor<[1,1,196,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 92, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x28>>, >
shape: #ttnn.shape<1x1x784x28>
tensor<[1,1,784,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x272>>, >
shape: #ttnn.shape<1x1x144x272>
tensor<[1,1,144,272,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 272, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1280>>, >
shape: #ttnn.shape<1x1x1x1280>
tensor<[1,1,1,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x240>>, >
shape: #ttnn.shape<1x1x1x240>
tensor<[1,1,1,240,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 240, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x240x1x1>
tensor<[1,240,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x160>>, >
shape: #ttnn.shape<1x1x576x160>
tensor<[1,1,576,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<576x960>>, >
shape: #ttnn.shape<1x1x576x960>
tensor<[1,1,576,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (576, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x960>>, >
shape: #ttnn.shape<1x1x144x960>
tensor<[1,1,144,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x640>>, >
shape: #ttnn.shape<1x1x1024x640>
tensor<[1,1,1024,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 640, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x960>>, >
shape: #ttnn.shape<1x1x9x960>
tensor<[1,1,9,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x960>>, >
shape: #ttnn.shape<1x1x9x960>
tensor<[1,1,9,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 3 + d2, d3), memory_config: (9, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x320>>, >
shape: #ttnn.shape<1x1x4096x320>
tensor<[1,1,4096,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x160>>, >
shape: #ttnn.shape<1x1x49x160>
tensor<[1,1,49,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x320>>, >
shape: #ttnn.shape<1x1x49x320>
tensor<[1,1,49,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 320, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x80>>, >
shape: #ttnn.shape<1x1x49x80>
tensor<[1,1,49,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x960>>, >
shape: #ttnn.shape<1x1x49x960>
tensor<[1,1,49,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x960>>, >
shape: #ttnn.shape<1x1x49x960>
tensor<[1,1,49,960,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 960, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x96>>, >
shape: #ttnn.shape<1x1x3136x96>
tensor<[1,1,3136,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x96>>, >
shape: #ttnn.shape<1x1x3136x96>
tensor<[1,1,3136,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x96>>, >
shape: #ttnn.shape<1x1x3600x96>
tensor<[1,1,3600,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 60 + d2, d3), memory_config: (3600, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4225x96>>, >
shape: #ttnn.shape<1x1x4225x96>
tensor<[1,1,4225,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4225 + d1 * 65 + d2, d3), memory_config: (4225, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x208>>, >
shape: #ttnn.shape<1x1x196x208>
tensor<[1,1,196,208,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 208, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x576>>, >
shape: #ttnn.shape<1x1x196x576>
tensor<[1,1,196,576,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 576, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<361x576>>, >
shape: #ttnn.shape<1x1x361x576>
tensor<[1,1,361,576,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 361 + d1 * 19 + d2, d3), memory_config: (361, 576, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1225x96>>, >
shape: #ttnn.shape<1x1x1225x96>
tensor<[1,1,1225,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (1225, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x128>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x24>>, >
shape: #ttnn.shape<1x1x3136x24>
tensor<[1,1,3136,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (3136, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3600x24>>, >
shape: #ttnn.shape<1x1x3600x24>
tensor<[1,1,3600,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 60 + d2, d3), memory_config: (3600, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4225x24>>, >
shape: #ttnn.shape<1x1x4225x24>
tensor<[1,1,4225,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4225 + d1 * 65 + d2, d3), memory_config: (4225, 24, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x20>>, >
shape: #ttnn.shape<1x1x784x20>
tensor<[1,1,784,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 20, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x128>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x128>>, >
shape: #ttnn.shape<1x1x49x128>
tensor<[1,1,49,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x13x128>
tensor<[1,13,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x128>
tensor<[1,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x32x128>
tensor<[1,32,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x1>
tensor<[1,1,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1x1>
tensor<[1,10,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1x1>
tensor<[1,10,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11x1x1>
tensor<[1,11,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1x1>
tensor<[1,12,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13x1x1>
tensor<[1,13,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1x1>
tensor<[1,14,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15x1x1>
tensor<[1,15,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x1>
tensor<[1,16,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17x1x1>
tensor<[1,17,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18x1x1>
tensor<[1,18,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19x1x1>
tensor<[1,19,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20x1x1>
tensor<[1,20,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21x1x1>
tensor<[1,21,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22x1x1>
tensor<[1,22,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23x1x1>
tensor<[1,23,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24x1x1>
tensor<[1,24,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25x1x1>
tensor<[1,25,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26x1x1>
tensor<[1,26,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 26 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27x1x1>
tensor<[1,27,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28x1x1>
tensor<[1,28,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29x1x1>
tensor<[1,29,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 29 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5x1x1>
tensor<[1,5,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1x1>
tensor<[1,6,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7x1x1>
tensor<[1,7,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x1>
tensor<[1,8,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1x1>
tensor<[1,9,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<45x1x1x1>
tensor<[45,1,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x1x1x1>
tensor<[5,1,1,1,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x7>>, >
shape: #ttnn.shape<1x12x197x197>
tensor<[1,12,197,197,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2364 + d1 * 197 + d2, d3), memory_config: (74, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<76x7>>, >
shape: #ttnn.shape<1x12x201x201>
tensor<[1,12,201,201,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2412 + d1 * 201 + d2, d3), memory_config: (76, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x12x8x8>
tensor<[1,12,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 8 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x7>>, >
shape: #ttnn.shape<1x16x197x197>
tensor<[1,16,197,197,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3152 + d1 * 197 + d2, d3), memory_config: (99, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x10>
tensor<[1,16,1,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x11>
tensor<[1,16,1,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x12>
tensor<[1,16,1,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x13>
tensor<[1,16,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x14>
tensor<[1,16,1,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x15>
tensor<[1,16,1,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x16>
tensor<[1,16,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x17>
tensor<[1,16,1,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x18>
tensor<[1,16,1,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x19>
tensor<[1,16,1,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x20>
tensor<[1,16,1,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x21>
tensor<[1,16,1,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x22>
tensor<[1,16,1,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x23>
tensor<[1,16,1,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x24>
tensor<[1,16,1,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x6>
tensor<[1,16,1,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x7>
tensor<[1,16,1,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x8>
tensor<[1,16,1,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1x9>
tensor<[1,16,1,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x16x5x5>
tensor<[1,16,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 5 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x1x16384x256>
tensor<[1,1,16384,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16384 + d2, d3), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x10>>, >
shape: #ttnn.shape<1x1x19200x300>
tensor<[1,1,19200,300,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 19200 + d2, d3), memory_config: (600, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x2x4096x256>
tensor<[1,2,4096,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 4096 + d2, d3), memory_config: (256, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x10>>, >
shape: #ttnn.shape<1x2x4800x300>
tensor<[1,2,4800,300,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 4800 + d2, d3), memory_config: (300, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<1x50257>
tensor<[1,50257,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x8>>, >
shape: #ttnn.shape<1x5x1024x256>
tensor<[1,5,1024,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 1024 + d2, d3), memory_config: (160, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<188x10>>, >
shape: #ttnn.shape<1x5x1200x300>
tensor<[1,5,1200,300,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6000 + d1 * 1200 + d2, d3), memory_config: (188, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x8x2048x256>
tensor<[1,8,2048,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 2048 + d2, d3), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x64>>, >
shape: #ttnn.shape<1x8x256x2048>
tensor<[1,8,256,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<1x8x256x256>
tensor<[1,8,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x10>>, >
shape: #ttnn.shape<1x8x300x300>
tensor<[1,8,300,300,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2400 + d1 * 300 + d2, d3), memory_config: (75, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x512>
tensor<[2,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x10>>, >
shape: #ttnn.shape<3x320x320>
tensor<[3,320,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 320 + d1, d2), memory_config: (30, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1x1536>
tensor<[1,1,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<1x13x3584>
tensor<[1,13,3584,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 112, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<2x1x128>
tensor<[2,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x193x768>
tensor<[1,193,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 193 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x32x4096>
tensor<[1,32,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x10x1024>
tensor<[1,10,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x10x512>
tensor<[1,10,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x15x512>
tensor<[1,15,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<2x13x768>
tensor<[2,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x6x512>
tensor<[1,6,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x10x1536>
tensor<[1,10,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x11x1536>
tensor<[1,11,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x13x1536>
tensor<[1,13,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x14x1536>
tensor<[1,14,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x15x1536>
tensor<[1,15,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x6x1536>
tensor<[1,6,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x7x1536>
tensor<[1,7,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x8x1536>
tensor<[1,8,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x9x1536>
tensor<[1,9,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x12>
tensor<[12,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x13>
tensor<[13,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32x32>
tensor<[32,32,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x6>
tensor<[6,6,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x12x12>
tensor<[1,1,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 * 12 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x13x13>
tensor<[1,1,13,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13 + d1 * 13 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x10>
tensor<[1,1,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x11>
tensor<[1,1,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x12>
tensor<[1,1,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x13>
tensor<[1,1,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x14>
tensor<[1,1,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x15>
tensor<[1,1,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x16>
tensor<[1,1,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x17>
tensor<[1,1,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x18>
tensor<[1,1,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x19>
tensor<[1,1,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x20>
tensor<[1,1,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x21>
tensor<[1,1,1,21,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x22>
tensor<[1,1,1,22,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x23>
tensor<[1,1,1,23,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x24>
tensor<[1,1,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x25>
tensor<[1,1,1,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x26>
tensor<[1,1,1,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x27>
tensor<[1,1,1,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x28>
tensor<[1,1,1,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x29>
tensor<[1,1,1,29,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x46>
tensor<[1,1,1,46,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x47>
tensor<[1,1,1,47,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x48>
tensor<[1,1,1,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x49>
tensor<[1,1,1,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x50>
tensor<[1,1,1,50,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x51>
tensor<[1,1,1,51,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x52>
tensor<[1,1,1,52,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x53>
tensor<[1,1,1,53,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x54>
tensor<[1,1,1,54,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x55>
tensor<[1,1,1,55,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x56>
tensor<[1,1,1,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x57>
tensor<[1,1,1,57,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x58>
tensor<[1,1,1,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x59>
tensor<[1,1,1,59,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x60>
tensor<[1,1,1,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x61>
tensor<[1,1,1,61,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x62>
tensor<[1,1,1,62,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x63>
tensor<[1,1,1,63,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x64>
tensor<[1,1,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x65>
tensor<[1,1,1,65,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x66>
tensor<[1,1,1,66,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x67>
tensor<[1,1,1,67,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x68>
tensor<[1,1,1,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x69>
tensor<[1,1,1,69,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x6>
tensor<[1,1,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x70>
tensor<[1,1,1,70,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x71>
tensor<[1,1,1,71,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x72>
tensor<[1,1,1,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x73>
tensor<[1,1,1,73,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x74>
tensor<[1,1,1,74,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x75>
tensor<[1,1,1,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x76>
tensor<[1,1,1,76,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x77>
tensor<[1,1,1,77,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x78>
tensor<[1,1,1,78,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x79>
tensor<[1,1,1,79,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x7>
tensor<[1,1,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x80>
tensor<[1,1,1,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x81>
tensor<[1,1,1,81,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x82>
tensor<[1,1,1,82,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x83>
tensor<[1,1,1,83,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x84>
tensor<[1,1,1,84,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x85>
tensor<[1,1,1,85,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x86>
tensor<[1,1,1,86,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x87>
tensor<[1,1,1,87,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x88>
tensor<[1,1,1,88,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x89>
tensor<[1,1,1,89,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x8>
tensor<[1,1,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x90>
tensor<[1,1,1,90,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x91>
tensor<[1,1,1,91,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x92>
tensor<[1,1,1,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x93>
tensor<[1,1,1,93,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x94>
tensor<[1,1,1,94,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x95>
tensor<[1,1,1,95,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x96>
tensor<[1,1,1,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x97>
tensor<[1,1,1,97,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x98>
tensor<[1,1,1,98,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x99>
tensor<[1,1,1,99,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x9>
tensor<[1,1,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x32x32>
tensor<[1,1,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 * 32 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x1x45x45>
tensor<[1,1,45,45,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 45 + d1 * 45 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x5x5>
tensor<[1,1,5,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5 + d1 * 5 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x7x7>
tensor<[1,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5>
tensor<[1,5,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<4x49x49>
tensor<[4,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<64x49x49>
tensor<[64,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1>
tensor<[8,1,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x2>
tensor<[8,2,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x3072x10x16>
tensor<[1,3072,10,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 10 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1056x1>>, >
shape: #ttnn.shape<1x3072x11x16>
tensor<[1,3072,11,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 33792 + d1 * 11 + d2, d3), memory_config: (1056, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1152x1>>, >
shape: #ttnn.shape<1x3072x12x16>
tensor<[1,3072,12,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36864 + d1 * 12 + d2, d3), memory_config: (1152, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1248x1>>, >
shape: #ttnn.shape<1x3072x13x16>
tensor<[1,3072,13,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 39936 + d1 * 13 + d2, d3), memory_config: (1248, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1344x1>>, >
shape: #ttnn.shape<1x3072x14x16>
tensor<[1,3072,14,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 43008 + d1 * 14 + d2, d3), memory_config: (1344, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x1>>, >
shape: #ttnn.shape<1x3072x15x16>
tensor<[1,3072,15,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 15 + d2, d3), memory_config: (1440, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<576x1>>, >
shape: #ttnn.shape<1x3072x6x16>
tensor<[1,3072,6,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18432 + d1 * 6 + d2, d3), memory_config: (576, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x3072x7x16>
tensor<[1,3072,7,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 7 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<1x3072x8x16>
tensor<[1,3072,8,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 8 + d2, d3), memory_config: (768, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x1>>, >
shape: #ttnn.shape<1x3072x9x16>
tensor<[1,3072,9,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27648 + d1 * 9 + d2, d3), memory_config: (864, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<3072x16>
tensor<[3072,16,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x10x10>
tensor<[1,1,10,10,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10 + d1 * 10 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x12x12>
tensor<[1,1,12,12,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 * 12 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x14x14>
tensor<[1,1,14,14,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14 + d1 * 14 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x16x16>
tensor<[1,1,16,16,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x16x2>
tensor<[1,1,1,16,2,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 16 + d1 * 16 + d2 * 16 + d3, d4), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (8, 8, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x25x25>
tensor<[1,1,25,25,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 25 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x6x6>
tensor<[1,1,6,6,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 * 6 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x7x7>
tensor<[1,1,7,7,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x9x9>
tensor<[1,1,9,9,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 9 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<142x1>>, >
shape: #ttnn.shape<1x71x64x7>
tensor<[1,71,64,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4544 + d1 * 64 + d2, d3), memory_config: (142, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x71x7x64>
tensor<[1,71,7,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x2x6x12x128>
tensor<[1,2,6,12,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 144 + d1 * 72 + d2 * 12 + d3, d4), memory_config: (5, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x2x6x13x128>
tensor<[1,2,6,13,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 156 + d1 * 78 + d2 * 13 + d3, d4), memory_config: (5, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x4>>, >
shape: #ttnn.shape<1x2x6x14x128>
tensor<[1,2,6,14,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 168 + d1 * 84 + d2 * 14 + d3, d4), memory_config: (6, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x4>>, >
shape: #ttnn.shape<1x2x6x15x128>
tensor<[1,2,6,15,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 180 + d1 * 90 + d2 * 15 + d3, d4), memory_config: (6, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x4>>, >
shape: #ttnn.shape<1x2x6x16x128>
tensor<[1,2,6,16,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 192 + d1 * 96 + d2 * 16 + d3, d4), memory_config: (6, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x4>>, >
shape: #ttnn.shape<1x2x6x17x128>
tensor<[1,2,6,17,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 204 + d1 * 102 + d2 * 17 + d3, d4), memory_config: (7, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x4>>, >
shape: #ttnn.shape<1x2x6x18x128>
tensor<[1,2,6,18,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 216 + d1 * 108 + d2 * 18 + d3, d4), memory_config: (7, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x4>>, >
shape: #ttnn.shape<1x2x6x19x128>
tensor<[1,2,6,19,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 228 + d1 * 114 + d2 * 19 + d3, d4), memory_config: (8, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x4>>, >
shape: #ttnn.shape<1x2x6x20x128>
tensor<[1,2,6,20,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 240 + d1 * 120 + d2 * 20 + d3, d4), memory_config: (8, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x4>>, >
shape: #ttnn.shape<1x2x6x21x128>
tensor<[1,2,6,21,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 252 + d1 * 126 + d2 * 21 + d3, d4), memory_config: (8, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x4>>, >
shape: #ttnn.shape<1x2x6x22x128>
tensor<[1,2,6,22,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 264 + d1 * 132 + d2 * 22 + d3, d4), memory_config: (9, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x4>>, >
shape: #ttnn.shape<1x2x6x23x128>
tensor<[1,2,6,23,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 276 + d1 * 138 + d2 * 23 + d3, d4), memory_config: (9, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x4>>, >
shape: #ttnn.shape<1x2x6x24x128>
tensor<[1,2,6,24,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 288 + d1 * 144 + d2 * 24 + d3, d4), memory_config: (9, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x2x6x25x128>
tensor<[1,2,6,25,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 300 + d1 * 150 + d2 * 25 + d3, d4), memory_config: (10, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x2x6x26x128>
tensor<[1,2,6,26,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 312 + d1 * 156 + d2 * 26 + d3, d4), memory_config: (10, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x4>>, >
shape: #ttnn.shape<1x2x6x27x128>
tensor<[1,2,6,27,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 324 + d1 * 162 + d2 * 27 + d3, d4), memory_config: (11, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x4>>, >
shape: #ttnn.shape<1x2x6x28x128>
tensor<[1,2,6,28,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 336 + d1 * 168 + d2 * 28 + d3, d4), memory_config: (11, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x4>>, >
shape: #ttnn.shape<1x2x6x29x128>
tensor<[1,2,6,29,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 348 + d1 * 174 + d2 * 29 + d3, d4), memory_config: (11, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x4>>, >
shape: #ttnn.shape<1x4x7x13x128>
tensor<[1,4,7,13,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 364 + d1 * 91 + d2 * 13 + d3, d4), memory_config: (12, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x5x1x16x2>
tensor<[1,5,1,16,2,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 80 + d1 * 16 + d2 * 16 + d3, d4), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x4>>, >
shape: #ttnn.shape<1x8x3x32x128>
tensor<[1,8,3,32,128,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 96 + d2 * 32 + d3, d4), memory_config: (24, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1x7x7>
tensor<[2,1,7,7,ui32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x13>
tensor<[13,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32x32>
tensor<[32,32,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x6>
tensor<[6,6,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x1024x3072>
tensor<[1,1024,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x16>>, >
shape: #ttnn.shape<1x1024x512>
tensor<[1,1024,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x16>>, >
shape: #ttnn.shape<1x1024x512>
tensor<[1,1024,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x192>>, >
shape: #ttnn.shape<1x1024x6144>
tensor<[1,1024,6144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x10x3072>
tensor<[1,10,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x10x3072>
tensor<[1,10,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x40>>, >
shape: #ttnn.shape<1x1200x1280>
tensor<[1,1200,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x40>>, >
shape: #ttnn.shape<1x1200x1280>
tensor<[1,1200,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x160>>, >
shape: #ttnn.shape<1x1370x5120>
tensor<[1,1370,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 160, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x160>>, >
shape: #ttnn.shape<1x1370x5120>
tensor<[1,1370,5120,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x24>>, >
shape: #ttnn.shape<1x1445x768>
tensor<[1,1445,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x24>>, >
shape: #ttnn.shape<1x1445x768>
tensor<[1,1445,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x64>>, >
shape: #ttnn.shape<1x14x14x2048>
tensor<[1,14,14,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x64>>, >
shape: #ttnn.shape<1x14x14x2048>
tensor<[1,14,14,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x96>>, >
shape: #ttnn.shape<1x1500x3072>
tensor<[1,1500,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x96>>, >
shape: #ttnn.shape<1x1500x3072>
tensor<[1,1500,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x16384x128>
tensor<[1,16384,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x16384x128>
tensor<[1,16384,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x48>>, >
shape: #ttnn.shape<1x16384x1536>
tensor<[1,16384,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x16384x768>
tensor<[1,16384,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x16x3072>
tensor<[1,16,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x16x3072>
tensor<[1,16,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x8>>, >
shape: #ttnn.shape<1x19200x256>
tensor<[1,19200,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x8>>, >
shape: #ttnn.shape<1x19200x256>
tensor<[1,19200,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x196x3072>
tensor<[1,196,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x196x3072>
tensor<[1,196,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x197x3072>
tensor<[1,197,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x197x3072>
tensor<[1,197,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x128>>, >
shape: #ttnn.shape<1x197x4096>
tensor<[1,197,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x128>>, >
shape: #ttnn.shape<1x197x4096>
tensor<[1,197,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x201x3072>
tensor<[1,201,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x201x3072>
tensor<[1,201,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x128>>, >
shape: #ttnn.shape<1x256x4096>
tensor<[1,256,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x128>>, >
shape: #ttnn.shape<1x256x4096>
tensor<[1,256,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x160>>, >
shape: #ttnn.shape<1x256x5120>
tensor<[1,256,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 160, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x160>>, >
shape: #ttnn.shape<1x256x5120>
tensor<[1,256,5120,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x192>>, >
shape: #ttnn.shape<1x256x6144>
tensor<[1,256,6144,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x96>>, >
shape: #ttnn.shape<1x257x3072>
tensor<[1,257,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x96>>, >
shape: #ttnn.shape<1x257x3072>
tensor<[1,257,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x25x3072>
tensor<[1,25,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x25x3072>
tensor<[1,25,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x32>>, >
shape: #ttnn.shape<1x28x28x1024>
tensor<[1,28,28,1024,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x32>>, >
shape: #ttnn.shape<1x28x28x1024>
tensor<[1,28,28,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x64>>, >
shape: #ttnn.shape<1x300x2048>
tensor<[1,300,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x64>>, >
shape: #ttnn.shape<1x300x2048>
tensor<[1,300,2048,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x40>>, >
shape: #ttnn.shape<1x4096x1280>
tensor<[1,4096,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x40>>, >
shape: #ttnn.shape<1x4096x1280>
tensor<[1,4096,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x4096x1536>
tensor<[1,4096,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<1x4096x256>
tensor<[1,4096,256,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<1x4096x256>
tensor<[1,4096,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x96>>, >
shape: #ttnn.shape<1x4096x3072>
tensor<[1,4096,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x16>>, >
shape: #ttnn.shape<1x4800x512>
tensor<[1,4800,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x16>>, >
shape: #ttnn.shape<1x4800x512>
tensor<[1,4800,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x16>>, >
shape: #ttnn.shape<1x56x56x512>
tensor<[1,56,56,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x16>>, >
shape: #ttnn.shape<1x56x56x512>
tensor<[1,56,56,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x160>>, >
shape: #ttnn.shape<1x64x5120>
tensor<[1,64,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 160, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x160>>, >
shape: #ttnn.shape<1x64x5120>
tensor<[1,64,5120,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x24>>, >
shape: #ttnn.shape<1x65536x768>
tensor<[1,65536,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x47>>, >
shape: #ttnn.shape<1x768x1500>
tensor<[1,768,1500,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 47, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x47>>, >
shape: #ttnn.shape<1x768x1500>
tensor<[1,768,1500,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 47, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x94>>, >
shape: #ttnn.shape<1x768x3000>
tensor<[1,768,3000,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 94, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x94>>, >
shape: #ttnn.shape<1x768x3000>
tensor<[1,768,3000,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 94, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x12>>, >
shape: #ttnn.shape<1x768x384>
tensor<[1,768,384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 12, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x12>>, >
shape: #ttnn.shape<1x768x384>
tensor<[1,768,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 768 + d1, d2), memory_config: (24, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x568>>, >
shape: #ttnn.shape<1x7x18176>
tensor<[1,7,18176,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 568, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x568>>, >
shape: #ttnn.shape<1x7x18176>
tensor<[1,7,18176,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 568, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x128>>, >
shape: #ttnn.shape<1x7x7x4096>
tensor<[1,7,7,4096,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x128>>, >
shape: #ttnn.shape<1x7x7x4096>
tensor<[1,7,7,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<4x1x4096>
tensor<[4,1,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x12>
tensor<[12,12,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x12>
tensor<[12,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x13>
tensor<[13,13,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x13>
tensor<[13,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32x32>
tensor<[32,32,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32x32>
tensor<[32,32,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<45x45>
tensor<[45,45,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<45x45>
tensor<[45,45,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x46>
tensor<[1,46,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x46>
tensor<[1,46,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x47>
tensor<[1,47,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x47>
tensor<[1,47,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x48>
tensor<[1,48,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x48>
tensor<[1,48,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x49>
tensor<[1,49,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x49>
tensor<[1,49,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x50>
tensor<[1,50,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x50>
tensor<[1,50,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x51>
tensor<[1,51,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x51>
tensor<[1,51,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x52>
tensor<[1,52,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x52>
tensor<[1,52,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x53>
tensor<[1,53,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x53>
tensor<[1,53,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x54>
tensor<[1,54,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x54>
tensor<[1,54,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x55>
tensor<[1,55,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x55>
tensor<[1,55,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x56>
tensor<[1,56,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x56>
tensor<[1,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x57>
tensor<[1,57,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x57>
tensor<[1,57,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x58>
tensor<[1,58,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x58>
tensor<[1,58,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x59>
tensor<[1,59,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x59>
tensor<[1,59,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x60>
tensor<[1,60,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x60>
tensor<[1,60,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x61>
tensor<[1,61,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x61>
tensor<[1,61,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x62>
tensor<[1,62,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x62>
tensor<[1,62,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x63>
tensor<[1,63,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x63>
tensor<[1,63,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x65>
tensor<[1,65,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x65>
tensor<[1,65,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x66>
tensor<[1,66,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x66>
tensor<[1,66,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x67>
tensor<[1,67,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x67>
tensor<[1,67,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x68>
tensor<[1,68,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x68>
tensor<[1,68,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x69>
tensor<[1,69,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x69>
tensor<[1,69,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x70>
tensor<[1,70,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x70>
tensor<[1,70,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x71>
tensor<[1,71,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x71>
tensor<[1,71,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x72>
tensor<[1,72,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x72>
tensor<[1,72,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x73>
tensor<[1,73,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x73>
tensor<[1,73,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x74>
tensor<[1,74,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x74>
tensor<[1,74,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x75>
tensor<[1,75,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x75>
tensor<[1,75,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x76>
tensor<[1,76,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x76>
tensor<[1,76,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x77>
tensor<[1,77,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x77>
tensor<[1,77,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x78>
tensor<[1,78,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x78>
tensor<[1,78,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x79>
tensor<[1,79,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x79>
tensor<[1,79,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x80>
tensor<[1,80,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x80>
tensor<[1,80,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x81>
tensor<[1,81,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x81>
tensor<[1,81,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x82>
tensor<[1,82,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x82>
tensor<[1,82,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x83>
tensor<[1,83,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x83>
tensor<[1,83,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x84>
tensor<[1,84,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x84>
tensor<[1,84,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x85>
tensor<[1,85,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x85>
tensor<[1,85,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x86>
tensor<[1,86,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x86>
tensor<[1,86,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x87>
tensor<[1,87,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x87>
tensor<[1,87,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x88>
tensor<[1,88,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x88>
tensor<[1,88,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x89>
tensor<[1,89,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x89>
tensor<[1,89,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x90>
tensor<[1,90,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x90>
tensor<[1,90,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x91>
tensor<[1,91,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x91>
tensor<[1,91,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x92>
tensor<[1,92,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x92>
tensor<[1,92,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x93>
tensor<[1,93,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x93>
tensor<[1,93,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x94>
tensor<[1,94,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x94>
tensor<[1,94,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x95>
tensor<[1,95,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x95>
tensor<[1,95,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x96>
tensor<[1,96,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x96>
tensor<[1,96,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x97>
tensor<[1,97,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x97>
tensor<[1,97,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x98>
tensor<[1,98,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x98>
tensor<[1,98,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x99>
tensor<[1,99,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x99>
tensor<[1,99,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1x1>
tensor<[1,256,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x480x1x1>
tensor<[1,480,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 + d2, d3), memory_config: (15, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x512x1x1>
tensor<[1,512,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x72x1x1>
tensor<[1,72,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x1x1>
tensor<[1,768,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 + d2, d3), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x960x1x1>
tensor<[1,960,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x5>>, >
shape: #ttnn.shape<1x16x160x160>
tensor<[1,16,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 160 + d2, d3), memory_config: (80, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x184x14x14>
tensor<[1,184,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2576 + d1 * 14 + d2, d3), memory_config: (81, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<115x1>>, >
shape: #ttnn.shape<1x184x20x20>
tensor<[1,184,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3680 + d1 * 20 + d2, d3), memory_config: (115, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<1x200x14x14>
tensor<[1,200,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2800 + d1 * 14 + d2, d3), memory_config: (88, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<125x1>>, >
shape: #ttnn.shape<1x200x20x20>
tensor<[1,200,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4000 + d1 * 20 + d2, d3), memory_config: (125, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x240x20x20>
tensor<[1,240,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 20 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x2>>, >
shape: #ttnn.shape<1x240x40x40>
tensor<[1,240,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 40 + d2, d3), memory_config: (300, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x480x10x10>
tensor<[1,480,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 10 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x1>>, >
shape: #ttnn.shape<1x480x20x20>
tensor<[1,480,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 20 + d2, d3), memory_config: (300, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x672x10x10>
tensor<[1,672,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 10 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x672x20x20>
tensor<[1,672,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 20 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x1152x7x7>
tensor<[1,1152,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 7 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x1>>, >
shape: #ttnn.shape<1x1152x8x8>
tensor<[1,1152,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 8 + d2, d3), memory_config: (288, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x116x14x14>
tensor<[1,116,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 14 + d2, d3), memory_config: (51, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<351x1>>, >
shape: #ttnn.shape<1x1248x9x9>
tensor<[1,1248,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11232 + d1 * 9 + d2, d3), memory_config: (351, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<400x1>>, >
shape: #ttnn.shape<1x1280x10x10>
tensor<[1,1280,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12800 + d1 * 10 + d2, d3), memory_config: (400, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1280x12x12>
tensor<[1,1280,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 12 + d2, d3), memory_config: (480, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x1280x9x9>
tensor<[1,1280,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 9 + d2, d3), memory_config: (360, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x128x1x1>
tensor<[1,128,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x128x2x2>
tensor<[1,128,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 2 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x128x3x3>
tensor<[1,128,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 3 + d2, d3), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x1>>, >
shape: #ttnn.shape<1x128x5x5>
tensor<[1,128,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 5 + d2, d3), memory_config: (20, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<118x1>>, >
shape: #ttnn.shape<1x134x28x28>
tensor<[1,134,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3752 + d1 * 28 + d2, d3), memory_config: (118, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<435x1>>, >
shape: #ttnn.shape<1x1392x10x10>
tensor<[1,1392,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13920 + d1 * 10 + d2, d3), memory_config: (435, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<675x5>>, >
shape: #ttnn.shape<1x144x150x150>
tensor<[1,144,150,150,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21600 + d1 * 150 + d2, d3), memory_config: (675, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<855x6>>, >
shape: #ttnn.shape<1x144x190x190>
tensor<[1,144,190,190,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27360 + d1 * 190 + d2, d3), memory_config: (855, 6, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x144x28x28>
tensor<[1,144,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 28 + d2, d3), memory_config: (126, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<135x1>>, >
shape: #ttnn.shape<1x144x30x30>
tensor<[1,144,30,30,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4320 + d1 * 30 + d2, d3), memory_config: (135, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x2>>, >
shape: #ttnn.shape<1x144x33x33>
tensor<[1,144,33,33,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 33 + d2, d3), memory_config: (149, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x2>>, >
shape: #ttnn.shape<1x144x56x56>
tensor<[1,144,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 56 + d2, d3), memory_config: (252, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<270x2>>, >
shape: #ttnn.shape<1x144x60x60>
tensor<[1,144,60,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8640 + d1 * 60 + d2, d3), memory_config: (270, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<293x3>>, >
shape: #ttnn.shape<1x144x65x65>
tensor<[1,144,65,65,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9360 + d1 * 65 + d2, d3), memory_config: (293, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<338x3>>, >
shape: #ttnn.shape<1x144x75x75>
tensor<[1,144,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10800 + d1 * 75 + d2, d3), memory_config: (338, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<428x3>>, >
shape: #ttnn.shape<1x144x95x95>
tensor<[1,144,95,95,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13680 + d1 * 95 + d2, d3), memory_config: (428, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x14x56x56>
tensor<[1,14,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 56 + d2, d3), memory_config: (25, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<612x1>>, >
shape: #ttnn.shape<1x1632x12x12>
tensor<[1,1632,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19584 + d1 * 12 + d2, d3), memory_config: (612, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x168x28x28>
tensor<[1,168,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 28 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x16x28x28>
tensor<[1,16,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x192x14x14>
tensor<[1,192,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 14 + d2, d3), memory_config: (84, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x192x28x28>
tensor<[1,192,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 28 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<228x2>>, >
shape: #ttnn.shape<1x192x38x38>
tensor<[1,192,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7296 + d1 * 38 + d2, d3), memory_config: (228, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x2>>, >
shape: #ttnn.shape<1x192x48x48>
tensor<[1,192,48,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 48 + d2, d3), memory_config: (288, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<450x3>>, >
shape: #ttnn.shape<1x192x75x75>
tensor<[1,192,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 75 + d2, d3), memory_config: (450, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<570x3>>, >
shape: #ttnn.shape<1x192x95x95>
tensor<[1,192,95,95,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18240 + d1 * 95 + d2, d3), memory_config: (570, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x196x14x14>
tensor<[1,196,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 14 + d2, d3), memory_config: (86, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x20x28x28>
tensor<[1,20,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 28 + d2, d3), memory_config: (18, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x1>>, >
shape: #ttnn.shape<1x240x15x15>
tensor<[1,240,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 15 + d2, d3), memory_config: (113, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x240x30x30>
tensor<[1,240,30,30,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 30 + d2, d3), memory_config: (225, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x2>>, >
shape: #ttnn.shape<1x24x56x56>
tensor<[1,24,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 56 + d2, d3), memory_config: (42, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x256x10x10>
tensor<[1,256,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 10 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x256x14x14>
tensor<[1,256,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 14 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x256x2x2>
tensor<[1,256,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 2 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x256x3x3>
tensor<[1,256,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 3 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x256x5x5>
tensor<[1,256,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 5 + d2, d3), memory_config: (40, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x1>>, >
shape: #ttnn.shape<1x272x7x7>
tensor<[1,272,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1904 + d1 * 7 + d2, d3), memory_config: (60, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<153x1>>, >
shape: #ttnn.shape<1x288x17x17>
tensor<[1,288,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4896 + d1 * 17 + d2, d3), memory_config: (153, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<171x1>>, >
shape: #ttnn.shape<1x288x19x19>
tensor<[1,288,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5472 + d1 * 19 + d2, d3), memory_config: (171, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<297x2>>, >
shape: #ttnn.shape<1x288x33x33>
tensor<[1,288,33,33,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9504 + d1 * 33 + d2, d3), memory_config: (297, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x2>>, >
shape: #ttnn.shape<1x288x38x38>
tensor<[1,288,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 38 + d2, d3), memory_config: (342, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x28>
tensor<[1,28,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x320x28x28>
tensor<[1,320,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 28 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x4>>, >
shape: #ttnn.shape<1x32x112x112>
tensor<[1,32,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 112 + d2, d3), memory_config: (112, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x4>>, >
shape: #ttnn.shape<1x32x120x120>
tensor<[1,32,120,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<130x5>>, >
shape: #ttnn.shape<1x32x130x130>
tensor<[1,32,130,130,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4160 + d1 * 130 + d2, d3), memory_config: (130, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x5>>, >
shape: #ttnn.shape<1x32x150x150>
tensor<[1,32,150,150,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 150 + d2, d3), memory_config: (150, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<190x6>>, >
shape: #ttnn.shape<1x32x190x190>
tensor<[1,32,190,190,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6080 + d1 * 190 + d2, d3), memory_config: (190, 6, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x334x14x14>
tensor<[1,334,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4676 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x336x24x24>
tensor<[1,336,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 24 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x2>>, >
shape: #ttnn.shape<1x336x48x48>
tensor<[1,336,48,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 48 + d2, d3), memory_config: (504, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x34x28x28>
tensor<[1,34,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 28 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x384x14x14>
tensor<[1,384,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 14 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x40x14x14>
tensor<[1,40,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 14 + d2, d3), memory_config: (18, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x40x56x56>
tensor<[1,40,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 56 + d2, d3), memory_config: (70, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x462x7x7>
tensor<[1,462,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3234 + d1 * 7 + d2, d3), memory_config: (102, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x46x28x28>
tensor<[1,46,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 28 + d2, d3), memory_config: (41, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x480x10x10>
tensor<[1,480,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 10 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x480x15x15>
tensor<[1,480,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 15 + d2, d3), memory_config: (225, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x512x5x5>
tensor<[1,512,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 5 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<281x1>>, >
shape: #ttnn.shape<1x528x17x17>
tensor<[1,528,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8976 + d1 * 17 + d2, d3), memory_config: (281, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x576x14x14>
tensor<[1,576,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 14 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x1>>, >
shape: #ttnn.shape<1x576x19x19>
tensor<[1,576,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 19 + d2, d3), memory_config: (342, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x576x7x7>
tensor<[1,576,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 7 + d2, d3), memory_config: (126, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x58x28x28>
tensor<[1,58,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 28 + d2, d3), memory_config: (51, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x640x14x14>
tensor<[1,640,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 14 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x4>>, >
shape: #ttnn.shape<1x64x112x112>
tensor<[1,64,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 112 + d2, d3), memory_config: (224, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1x1>
tensor<[1,64,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x64x2x2>
tensor<[1,64,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 2 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x2>>, >
shape: #ttnn.shape<1x64x56x56>
tensor<[1,64,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 56 + d2, d3), memory_config: (112, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x672x15x15>
tensor<[1,672,15,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 15 + d2, d3), memory_config: (315, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x672x20x20>
tensor<[1,672,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 20 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x672x24x24>
tensor<[1,672,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 24 + d2, d3), memory_config: (504, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x672x8x8>
tensor<[1,672,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 8 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x68x14x14>
tensor<[1,68,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 14 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x2>>, >
shape: #ttnn.shape<1x68x56x56>
tensor<[1,68,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 56 + d2, d3), memory_config: (119, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<383x1>>, >
shape: #ttnn.shape<1x720x17x17>
tensor<[1,720,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12240 + d1 * 17 + d2, d3), memory_config: (383, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x720x9x9>
tensor<[1,720,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6480 + d1 * 9 + d2, d3), memory_config: (203, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<69x1>>, >
shape: #ttnn.shape<1x78x28x28>
tensor<[1,78,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2184 + d1 * 28 + d2, d3), memory_config: (69, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<255x1>>, >
shape: #ttnn.shape<1x816x10x10>
tensor<[1,816,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8160 + d1 * 10 + d2, d3), memory_config: (255, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<485x1>>, >
shape: #ttnn.shape<1x816x19x19>
tensor<[1,816,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15504 + d1 * 19 + d2, d3), memory_config: (485, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x960x12x12>
tensor<[1,960,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 12 + d2, d3), memory_config: (360, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x1>>, >
shape: #ttnn.shape<1x960x24x24>
tensor<[1,960,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 24 + d2, d3), memory_config: (720, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x4>>, >
shape: #ttnn.shape<1x96x112x112>
tensor<[1,96,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 112 + d2, d3), memory_config: (336, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x4>>, >
shape: #ttnn.shape<1x96x120x120>
tensor<[1,96,120,120,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 120 + d2, d3), memory_config: (360, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<390x5>>, >
shape: #ttnn.shape<1x96x130x130>
tensor<[1,96,130,130,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12480 + d1 * 130 + d2, d3), memory_config: (390, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x2>>, >
shape: #ttnn.shape<1x96x56x56>
tensor<[1,96,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 56 + d2, d3), memory_config: (168, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x2>>, >
shape: #ttnn.shape<1x96x60x60>
tensor<[1,96,60,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 60 + d2, d3), memory_config: (180, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<195x3>>, >
shape: #ttnn.shape<1x96x65x65>
tensor<[1,96,65,65,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6240 + d1 * 65 + d2, d3), memory_config: (195, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x98x28x28>
tensor<[1,98,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 28 + d2, d3), memory_config: (86, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x32>
tensor<[1,1,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5x32>
tensor<[1,5,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x128x32x32>
tensor<[1,128,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 32 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x32x256x256>
tensor<[1,32,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 256 + d2, d3), memory_config: (256, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x16>>, >
shape: #ttnn.shape<1x32x512x512>
tensor<[1,32,512,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 512 + d2, d3), memory_config: (512, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x512x32x32>
tensor<[1,512,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 32 + d2, d3), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x64x256x256>
tensor<[1,64,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 256 + d2, d3), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x6>
tensor<[6,6,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x6>
tensor<[6,6,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x1x3136x128>
tensor<[1,1,3136,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x4>>, >
shape: #ttnn.shape<1x1x196x128>
tensor<[1,1,196,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x4>>, >
shape: #ttnn.shape<1x1x784x128>
tensor<[1,1,784,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x4>>, >
shape: #ttnn.shape<1x1x1024x128>
tensor<[1,1,1024,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x1x196x16>
tensor<[1,1,196,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<39x6>>, >
shape: #ttnn.shape<1x1x1225x192>
tensor<[1,1,1225,192,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1225 + d1 * 35 + d2, d3), memory_config: (39, 6, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x8>>, >
shape: #ttnn.shape<1x1x49x256>
tensor<[1,1,49,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x1x784x256>
tensor<[1,1,784,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x10>>, >
shape: #ttnn.shape<1x1x196x320>
tensor<[1,1,196,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x1x3136x32>
tensor<[1,1,3136,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x1x16384x32>
tensor<[1,1,16384,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x12>>, >
shape: #ttnn.shape<1x1x289x384>
tensor<[1,1,289,384,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 289 + d1 * 17 + d2, d3), memory_config: (10, 12, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x1x49x4>
tensor<[1,1,49,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x16>>, >
shape: #ttnn.shape<1x1x49x512>
tensor<[1,1,49,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x1x196x512>
tensor<[1,1,196,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x20>>, >
shape: #ttnn.shape<1x1x49x640>
tensor<[1,1,49,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<1x1x3136x64>
tensor<[1,1,3136,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x1x4096x64>
tensor<[1,1,4096,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<167x2>>, >
shape: #ttnn.shape<1x1x5329x64>
tensor<[1,1,5329,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5329 + d1 * 73 + d2, d3), memory_config: (167, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x1x12544x64>
tensor<[1,1,12544,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 112 + d2, d3), memory_config: (392, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x1x144x64>
tensor<[1,1,144,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1800x2>>, >
shape: #ttnn.shape<1x1x57600x64>
tensor<[1,1,57600,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 57600 + d1 * 320 + d2, d3), memory_config: (1800, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x1x784x64>
tensor<[1,1,784,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x26>>, >
shape: #ttnn.shape<1x1x49x832>
tensor<[1,1,49,832,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 26, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1x1>
tensor<[1,1024,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11x1>
tensor<[1,11,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x1392x1x1>
tensor<[1,1392,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1392 + d1 + d2, d3), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13x1>
tensor<[1,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13x1>
tensor<[1,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x1536x1x1>
tensor<[1,1536,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 + d2, d3), memory_config: (48, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15x1>
tensor<[1,15,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15x1>
tensor<[1,15,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x1>>, >
shape: #ttnn.shape<1x1920x1x1>
tensor<[1,1920,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 + d2, d3), memory_config: (60, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x2048x1x1>
tensor<[1,2048,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 + d2, d3), memory_config: (64, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x2048x1x1>
tensor<[1,2048,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 + d2, d3), memory_config: (64, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x232x1x1>
tensor<[1,232,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 232 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<79x1>>, >
shape: #ttnn.shape<1x2520x1x1>
tensor<[1,2520,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2520 + d1 + d2, d3), memory_config: (79, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1x1>
tensor<[1,256,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1>
tensor<[1,32,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1>
tensor<[1,32,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<116x1>>, >
shape: #ttnn.shape<1x3712x1x1>
tensor<[1,3712,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3712 + d1 + d2, d3), memory_config: (116, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x480x1x1>
tensor<[1,480,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 + d2, d3), memory_config: (15, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x480x1x1>
tensor<[1,480,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 + d2, d3), memory_config: (15, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x480x1x1>
tensor<[1,480,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 + d2, d3), memory_config: (15, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x1>>, >
shape: #ttnn.shape<1x480x1x1>
tensor<[1,480,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 + d2, d3), memory_config: (15, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x512x1x1>
tensor<[1,512,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x512x1x1>
tensor<[1,512,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x672x1x1>
tensor<[1,672,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<22x1>>, >
shape: #ttnn.shape<1x696x1x1>
tensor<[1,696,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 696 + d1 + d2, d3), memory_config: (22, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1>
tensor<[1,6,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x72x1x1>
tensor<[1,72,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x72x1x1>
tensor<[1,72,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x72x1x1>
tensor<[1,72,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 72 + d1 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x768x1x1>
tensor<[1,768,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 + d2, d3), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7x1>
tensor<[1,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1>
tensor<[1,8,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x960x1x1>
tensor<[1,960,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x960x1x1>
tensor<[1,960,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 + d2, d3), memory_config: (30, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x13x1>
tensor<[2,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<100x4>
tensor<[100,4,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<100x92>
tensor<[100,92,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x256>
tensor<[100,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x64>>, >
shape: #ttnn.shape<100x2048>
tensor<[100,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x256>
tensor<[100,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x192>>, >
shape: #ttnn.shape<1024x6144>
tensor<[1024,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1024x768>
tensor<[1024,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1024x160>
tensor<[1024,160,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x8>>, >
shape: #ttnn.shape<1024x256>
tensor<[1024,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1024x640>
tensor<[1024,640,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1024x640>
tensor<[1024,640,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1024x1536>
tensor<[1024,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1024x768>
tensor<[1024,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1024x1536>
tensor<[1024,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1024x160>
tensor<[1024,160,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x160>>, >
shape: #ttnn.shape<1024x5120>
tensor<[1024,5120,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1024x640>
tensor<[1024,640,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1024x3072>
tensor<[1024,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<10x1024>
tensor<[10,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<10x4096>
tensor<[10,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<10x50280>
tensor<[10,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<10x6144>
tensor<[10,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<10x512>
tensor<[10,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<10x768>
tensor<[10,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<10x1024>
tensor<[10,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<10x2048>
tensor<[10,2048,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<10x512>
tensor<[10,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7813>>, >
shape: #ttnn.shape<10x250002>
tensor<[10,250002,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 7813, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<10x3072>
tensor<[10,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<10x768>
tensor<[10,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<10x3072>
tensor<[10,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<11x50280>
tensor<[11,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<11x6144>
tensor<[11,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<11x3072>
tensor<[11,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1200x320>
tensor<[1200,320,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 10, 'tile<32x32, bf16>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x40>>, >
shape: #ttnn.shape<1200x1280>
tensor<[1200,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1200x320>
tensor<[1200,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x48>>, >
shape: #ttnn.shape<1296x1536>
tensor<[1296,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x144>>, >
shape: #ttnn.shape<1296x4608>
tensor<[1296,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x72>>, >
shape: #ttnn.shape<1296x2304>
tensor<[1296,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x24>>, >
shape: #ttnn.shape<1296x768>
tensor<[1296,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<12x768>
tensor<[12,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<12x1536>
tensor<[12,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<12x256>
tensor<[12,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<12x50280>
tensor<[12,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<12x6144>
tensor<[12,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x280>>, >
shape: #ttnn.shape<12x8960>
tensor<[12,8960,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 280, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<12x768>
tensor<[12,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x2>
tensor<[12,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<12x3072>
tensor<[12,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<12x768>
tensor<[12,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<12x1536>
tensor<[12,1536,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<12x3072>
tensor<[12,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1370x1280>
tensor<[1370,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x120>>, >
shape: #ttnn.shape<1370x3840>
tensor<[1370,3840,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 120, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x160>>, >
shape: #ttnn.shape<1370x5120>
tensor<[1370,5120,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1370x1280>
tensor<[1370,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<13x50280>
tensor<[13,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<13x6144>
tensor<[13,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<13x3584>
tensor<[13,3584,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 112, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x592>>, >
shape: #ttnn.shape<13x18944>
tensor<[13,18944,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 592, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x2>
tensor<[13,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<13x3584>
tensor<[13,3584,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 112, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<13x512>
tensor<[13,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<13x3072>
tensor<[13,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1445x192>
tensor<[1445,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x24>>, >
shape: #ttnn.shape<1445x768>
tensor<[1445,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1445x192>
tensor<[1445,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<14x768>
tensor<[14,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<14x50280>
tensor<[14,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<14x6144>
tensor<[14,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<14x512>
tensor<[14,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<14x768>
tensor<[14,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<14x2048>
tensor<[14,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<14x512>
tensor<[14,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<14x2>
tensor<[14,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<14x3072>
tensor<[14,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<14x768>
tensor<[14,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<14x3072>
tensor<[14,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1500x768>
tensor<[1500,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x96>>, >
shape: #ttnn.shape<1500x3072>
tensor<[1500,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1500x768>
tensor<[1500,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<15x512>
tensor<[15,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<15x50280>
tensor<[15,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<15x6144>
tensor<[15,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<15x512>
tensor<[15,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<15x1024>
tensor<[15,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x12>>, >
shape: #ttnn.shape<15x384>
tensor<[15,384,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 12, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<15x3072>
tensor<[15,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<16384x32>
tensor<[16384,32,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<16384x384>
tensor<[16384,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<16384x768>
tensor<[16384,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<16384x128>
tensor<[16384,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<16384x256>
tensor<[16384,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<16384x32>
tensor<[16384,32,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x48>>, >
shape: #ttnn.shape<16384x1536>
tensor<[16384,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<16384x192>
tensor<[16384,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<16384x384>
tensor<[16384,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<16x768>
tensor<[16,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<16x3072>
tensor<[16,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<16x768>
tensor<[16,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x6>>, >
shape: #ttnn.shape<17424x192>
tensor<[17424,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x18>>, >
shape: #ttnn.shape<17424x576>
tensor<[17424,576,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 18, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x36>>, >
shape: #ttnn.shape<17424x1152>
tensor<[17424,1152,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 36, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x12>>, >
shape: #ttnn.shape<17424x384>
tensor<[17424,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<19200x64>
tensor<[19200,64,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x8>>, >
shape: #ttnn.shape<19200x256>
tensor<[19200,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<19200x64>
tensor<[19200,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<196x512>
tensor<[196,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<196x512>
tensor<[196,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<196x768>
tensor<[196,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x48>>, >
shape: #ttnn.shape<196x1536>
tensor<[196,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x64>>, >
shape: #ttnn.shape<196x2048>
tensor<[196,2048,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<196x512>
tensor<[196,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<196x3072>
tensor<[196,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<197x1024>
tensor<[197,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x128>>, >
shape: #ttnn.shape<197x4096>
tensor<[197,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<197x768>
tensor<[197,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<197x1024>
tensor<[197,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<197x3072>
tensor<[197,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<197x768>
tensor<[197,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x3072>
tensor<[1,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1004>>, >
shape: #ttnn.shape<1x32128>
tensor<[1,32128,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1004, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x4096>
tensor<[1,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<1x51200>
tensor<[1,51200,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x25>>, >
shape: #ttnn.shape<1x784>
tensor<[1,784,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 25, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x3>
tensor<[1,3,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4748>>, >
shape: #ttnn.shape<1x151936>
tensor<[1,151936,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4748, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<1x256>
tensor<[1,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x98>>, >
shape: #ttnn.shape<1x3129>
tensor<[1,3129,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 98, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x280>>, >
shape: #ttnn.shape<1x8960>
tensor<[1,8960,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 280, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x4096>
tensor<[1,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x4096>
tensor<[1,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x2048>
tensor<[1,2048,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1004>>, >
shape: #ttnn.shape<1x32128>
tensor<[1,32128,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1004, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x12>>, >
shape: #ttnn.shape<1x384>
tensor<[1,384,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 12, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<1x50272>
tensor<[1,50272,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x128>
tensor<[1,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x2>
tensor<[1,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x683>>, >
shape: #ttnn.shape<1x21843>
tensor<[1,21843,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 683, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x3>
tensor<[1,3,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x3072>
tensor<[1,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1004>>, >
shape: #ttnn.shape<1x32128>
tensor<[1,32128,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1004, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<1x50257>
tensor<[1,50257,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x128>
tensor<[1,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x128>
tensor<[1,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x40>>, >
shape: #ttnn.shape<1x1280>
tensor<[1,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<201x768>
tensor<[201,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<201x3072>
tensor<[201,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<201x768>
tensor<[201,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x40>>, >
shape: #ttnn.shape<2048x1280>
tensor<[2048,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<2048x256>
tensor<[2048,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x9>>, >
shape: #ttnn.shape<2048x262>
tensor<[2048,262,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 9, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<2048x768>
tensor<[2048,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<256x1024>
tensor<[256,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<256x2>
tensor<[256,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<256x256>
tensor<[256,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x128>>, >
shape: #ttnn.shape<256x4096>
tensor<[256,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x320>>, >
shape: #ttnn.shape<256x10240>
tensor<[256,10240,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 320, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<256x1280>
tensor<[256,1280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<256x256>
tensor<[256,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x24>>, >
shape: #ttnn.shape<256x768>
tensor<[256,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x192>>, >
shape: #ttnn.shape<256x6144>
tensor<[256,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<256x160>
tensor<[256,160,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<256x1024>
tensor<[256,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<256x256>
tensor<[256,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<256x512>
tensor<[256,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<256x1536>
tensor<[256,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<256x32>
tensor<[256,32,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<256x1024>
tensor<[256,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<256x1280>
tensor<[256,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<256x256>
tensor<[256,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<256x1536>
tensor<[256,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<256x64>
tensor<[256,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<256x512>
tensor<[256,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<257x768>
tensor<[257,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x72>>, >
shape: #ttnn.shape<257x2304>
tensor<[257,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x96>>, >
shape: #ttnn.shape<257x3072>
tensor<[257,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<257x768>
tensor<[257,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<25x768>
tensor<[25,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<25x2>
tensor<[25,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<25x3072>
tensor<[25,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<25x768>
tensor<[25,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<26x768>
tensor<[26,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<26x3072>
tensor<[26,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<26x768>
tensor<[26,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x954>>, >
shape: #ttnn.shape<27x30522>
tensor<[27,30522,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 954, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<27x38>
tensor<[27,38,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<27x50257>
tensor<[27,50257,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1>
tensor<[2,1,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x512>
tensor<[2,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<300x128>
tensor<[300,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<300x512>
tensor<[300,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<300x320>
tensor<[300,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x64>>, >
shape: #ttnn.shape<300x2048>
tensor<[300,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<300x512>
tensor<[300,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<300x64>
tensor<[300,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<3136x128>
tensor<[3136,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x12>>, >
shape: #ttnn.shape<3136x384>
tensor<[3136,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x16>>, >
shape: #ttnn.shape<3136x512>
tensor<[3136,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<3136x128>
tensor<[3136,128,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<32x4096>
tensor<[32,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<32x1536>
tensor<[32,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7840>>, >
shape: #ttnn.shape<32x250880>
tensor<[32,250880,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 7840, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x144>>, >
shape: #ttnn.shape<32x4608>
tensor<[32,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<32x6144>
tensor<[32,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<32x1024>
tensor<[32,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4008>>, >
shape: #ttnn.shape<32x128256>
tensor<[32,128256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4008, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<32x3072>
tensor<[32,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<32x8192>
tensor<[32,8192,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x344>>, >
shape: #ttnn.shape<32x11008>
tensor<[32,11008,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 344, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1000>>, >
shape: #ttnn.shape<32x32000>
tensor<[32,32000,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1000, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<32x4096>
tensor<[32,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<32x1536>
tensor<[32,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<32x3072>
tensor<[32,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<4096x320>
tensor<[4096,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<4096x384>
tensor<[4096,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<4096x768>
tensor<[4096,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<4096x64>
tensor<[4096,64,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<4096x768>
tensor<[4096,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x80>>, >
shape: #ttnn.shape<4096x2560>
tensor<[4096,2560,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 80, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<4096x320>
tensor<[4096,320,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<4096x1536>
tensor<[4096,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<4096x256>
tensor<[4096,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<4096x64>
tensor<[4096,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x96>>, >
shape: #ttnn.shape<4096x3072>
tensor<[4096,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<4096x384>
tensor<[4096,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<45x768>
tensor<[45,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<45x3072>
tensor<[45,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1571>>, >
shape: #ttnn.shape<45x50257>
tensor<[45,50257,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 1571, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<45x768>
tensor<[45,768,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<4800x128>
tensor<[4800,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x16>>, >
shape: #ttnn.shape<4800x512>
tensor<[4800,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<4800x128>
tensor<[4800,128,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<49x1024>
tensor<[49,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<49x3072>
tensor<[49,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x128>>, >
shape: #ttnn.shape<49x4096>
tensor<[49,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<49x1024>
tensor<[49,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<49x1024>
tensor<[49,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1024>
tensor<[4,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<4x2048>
tensor<[4,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<4x4096>
tensor<[4,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1024>
tensor<[4,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<50x768>
tensor<[50,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<50x3072>
tensor<[50,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<50x768>
tensor<[50,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x36>>, >
shape: #ttnn.shape<5184x1152>
tensor<[5184,1152,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 36, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x12>>, >
shape: #ttnn.shape<5184x384>
tensor<[5184,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x72>>, >
shape: #ttnn.shape<5184x2304>
tensor<[5184,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x24>>, >
shape: #ttnn.shape<5184x768>
tensor<[5184,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<52x1024>
tensor<[52,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<52x1024>
tensor<[52,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x48>>, >
shape: #ttnn.shape<576x1536>
tensor<[576,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (18, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x144>>, >
shape: #ttnn.shape<576x4608>
tensor<[576,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (18, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<5x1024>
tensor<[5,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<5x3072>
tensor<[5,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<5x4096>
tensor<[5,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<5x51200>
tensor<[5,51200,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<5x1024>
tensor<[5,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x8>>, >
shape: #ttnn.shape<600x256>
tensor<[600,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (19, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x1>>, >
shape: #ttnn.shape<600x4>
tensor<[600,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (19, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x3>>, >
shape: #ttnn.shape<600x92>
tensor<[600,92,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (19, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x320>>, >
shape: #ttnn.shape<64x10240>
tensor<[64,10240,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 320, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<64x1280>
tensor<[64,1280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<64x1280>
tensor<[64,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x24>>, >
shape: #ttnn.shape<65536x768>
tensor<[65536,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2048, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<65536x192>
tensor<[65536,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x6>>, >
shape: #ttnn.shape<69696x192>
tensor<[69696,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2178, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x18>>, >
shape: #ttnn.shape<69696x576>
tensor<[69696,576,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2178, 18, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<6x1024>
tensor<[6,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<6x4096>
tensor<[6,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<6x512>
tensor<[6,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<6x50280>
tensor<[6,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<6x6144>
tensor<[6,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<6x1024>
tensor<[6,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<6x1024>
tensor<[6,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<6x50272>
tensor<[6,50272,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<6x3072>
tensor<[6,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x12>>, >
shape: #ttnn.shape<768x384>
tensor<[768,384,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (24, 12, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x7>>, >
shape: #ttnn.shape<768x196>
tensor<[768,196,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (24, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<784x256>
tensor<[784,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x32>>, >
shape: #ttnn.shape<784x1024>
tensor<[784,1024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<784x256>
tensor<[784,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x24>>, >
shape: #ttnn.shape<784x768>
tensor<[784,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<784x256>
tensor<[784,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<7x50280>
tensor<[7,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<7x6144>
tensor<[7,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<7x4544>
tensor<[7,4544,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 142, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<7x768>
tensor<[7,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x568>>, >
shape: #ttnn.shape<7x18176>
tensor<[7,18176,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 568, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<7x4544>
tensor<[7,4544,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 142, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x146>>, >
shape: #ttnn.shape<7x4672>
tensor<[7,4672,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 146, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2032>>, >
shape: #ttnn.shape<7x65024>
tensor<[7,65024,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2032, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<7x2>
tensor<[7,2,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x72>>, >
shape: #ttnn.shape<7x2304>
tensor<[7,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<7x3072>
tensor<[7,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<7x768>
tensor<[7,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<7x3072>
tensor<[7,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<8x50280>
tensor<[8,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<8x6144>
tensor<[8,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<8x3072>
tensor<[8,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x256>
tensor<[920,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x64>>, >
shape: #ttnn.shape<920x2048>
tensor<[920,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x256>
tensor<[920,256,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<9x1024>
tensor<[9,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<9x128>
tensor<[9,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<9x4096>
tensor<[9,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<9x1024>
tensor<[9,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<9x2048>
tensor<[9,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x938>>, >
shape: #ttnn.shape<9x30000>
tensor<[9,30000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 938, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<9x4096>
tensor<[9,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<9x768>
tensor<[9,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1572>>, >
shape: #ttnn.shape<9x50280>
tensor<[9,50280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1572, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<9x6144>
tensor<[9,6144,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<9x4096>
tensor<[9,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<9x128>
tensor<[9,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<9x2048>
tensor<[9,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<9x8192>
tensor<[9,8192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 256, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<9x768>
tensor<[9,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<9x1024>
tensor<[9,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<9x128>
tensor<[9,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<9x16384>
tensor<[9,16384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 512, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<9x4096>
tensor<[9,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<9x128>
tensor<[9,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x40>>, >
shape: #ttnn.shape<9x1280>
tensor<[9,1280,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<9x3072>
tensor<[9,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x10>>, >
shape: #ttnn.shape<9x320>
tensor<[9,320,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x20>>, >
shape: #ttnn.shape<9x640>
tensor<[9,640,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<9x768>
tensor<[9,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<9x2048>
tensor<[9,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<9x3072>
tensor<[9,3072,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x12>
tensor<[1,12,128,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x13>
tensor<[1,12,128,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x14>
tensor<[1,12,128,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x15>
tensor<[1,12,128,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x16>
tensor<[1,12,128,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x17>
tensor<[1,12,128,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x18>
tensor<[1,12,128,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x19>
tensor<[1,12,128,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x20>
tensor<[1,12,128,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x21>
tensor<[1,12,128,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x22>
tensor<[1,12,128,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x23>
tensor<[1,12,128,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x24>
tensor<[1,12,128,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x25>
tensor<[1,12,128,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x26>
tensor<[1,12,128,26,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x27>
tensor<[1,12,128,27,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x28>
tensor<[1,12,128,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x12x128x29>
tensor<[1,12,128,29,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 128 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x12x12x128>
tensor<[1,12,12,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x12x12x64>
tensor<[1,12,12,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x12x14x64>
tensor<[1,12,14,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 14 + d2, d3), memory_config: (6, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<563x2>>, >
shape: #ttnn.shape<1x12x1500x64>
tensor<[1,12,1500,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18000 + d1 * 1500 + d2, d3), memory_config: (563, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x2>>, >
shape: #ttnn.shape<1x12x16x64>
tensor<[1,12,16,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 16 + d2, d3), memory_config: (6, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<74x2>>, >
shape: #ttnn.shape<1x12x197x64>
tensor<[1,12,197,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2364 + d1 * 197 + d2, d3), memory_config: (74, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x128>
tensor<[1,12,1,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x12x25x64>
tensor<[1,12,25,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 300 + d1 * 25 + d2, d3), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x2>>, >
shape: #ttnn.shape<1x12x50x64>
tensor<[1,12,50,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 600 + d1 * 50 + d2, d3), memory_config: (19, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x10>
tensor<[1,12,64,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x12>
tensor<[1,12,64,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x14>
tensor<[1,12,64,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x47>>, >
shape: #ttnn.shape<1x12x64x1500>
tensor<[1,12,64,1500,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 47, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x16>
tensor<[1,12,64,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x7>>, >
shape: #ttnn.shape<1x12x64x197>
tensor<[1,12,64,197,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x25>
tensor<[1,12,64,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x2>>, >
shape: #ttnn.shape<1x12x64x50>
tensor<[1,12,64,50,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x7>
tensor<[1,12,64,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x12x64x9>
tensor<[1,12,64,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 64 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x12x7x64>
tensor<[1,12,7,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 7 + d2, d3), memory_config: (3, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<1x12x9x64>
tensor<[1,12,9,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 108 + d1 * 9 + d2, d3), memory_config: (4, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x16x128x9>
tensor<[1,16,128,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 128 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<685x3>>, >
shape: #ttnn.shape<1x16x1370x80>
tensor<[1,16,1370,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21920 + d1 * 1370 + d2, d3), memory_config: (685, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x16x1x64>
tensor<[1,16,1,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x16x256x64>
tensor<[1,16,256,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 256 + d2, d3), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x10>
tensor<[1,16,64,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x11>
tensor<[1,16,64,11,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x12>
tensor<[1,16,64,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x13>
tensor<[1,16,64,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x14>
tensor<[1,16,64,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x15>
tensor<[1,16,64,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x16>
tensor<[1,16,64,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x17>
tensor<[1,16,64,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x18>
tensor<[1,16,64,18,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x19>
tensor<[1,16,64,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x20>
tensor<[1,16,64,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x21>
tensor<[1,16,64,21,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x22>
tensor<[1,16,64,22,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x23>
tensor<[1,16,64,23,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x24>
tensor<[1,16,64,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x8>>, >
shape: #ttnn.shape<1x16x64x256>
tensor<[1,16,64,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x25>
tensor<[1,16,64,25,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x26>
tensor<[1,16,64,26,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x27>
tensor<[1,16,64,27,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x28>
tensor<[1,16,64,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x29>
tensor<[1,16,64,29,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x6>
tensor<[1,16,64,6,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x7>
tensor<[1,16,64,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x8>
tensor<[1,16,64,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x9>
tensor<[1,16,64,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x16x64x9>
tensor<[1,16,64,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x2>>, >
shape: #ttnn.shape<1x16x6x64>
tensor<[1,16,6,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 96 + d1 * 6 + d2, d3), memory_config: (3, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x43>>, >
shape: #ttnn.shape<1x16x80x1370>
tensor<[1,16,80,1370,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 80 + d2, d3), memory_config: (40, 43, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x16x9x128>
tensor<[1,16,9,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x2>>, >
shape: #ttnn.shape<1x16x9x64>
tensor<[1,16,9,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 9 + d2, d3), memory_config: (5, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x1x64x7>
tensor<[1,1,64,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 * 64 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x24x128x32>
tensor<[1,24,128,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 * 128 + d2, d3), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x4>>, >
shape: #ttnn.shape<1x24x32x128>
tensor<[1,24,32,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x28x128x13>
tensor<[1,28,128,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 128 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x4>>, >
shape: #ttnn.shape<1x28x13x128>
tensor<[1,28,13,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x32x128x32>
tensor<[1,32,128,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x4>>, >
shape: #ttnn.shape<1x32x32x128>
tensor<[1,32,32,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x2>>, >
shape: #ttnn.shape<1x3x1445x64>
tensor<[1,3,1445,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4335 + d1 * 1445 + d2, d3), memory_config: (136, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x46>>, >
shape: #ttnn.shape<1x3x64x1445>
tensor<[1,3,64,1445,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 192 + d1 * 64 + d2, d3), memory_config: (6, 46, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x64x64x9>
tensor<[1,64,64,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x2>>, >
shape: #ttnn.shape<1x64x9x64>
tensor<[1,64,9,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 9 + d2, d3), memory_config: (18, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x71x7x64>
tensor<[1,71,7,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x3>>, >
shape: #ttnn.shape<1x8x1024x80>
tensor<[1,8,1024,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 1024 + d2, d3), memory_config: (256, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x8>>, >
shape: #ttnn.shape<1x8x160x256>
tensor<[1,8,160,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 160 + d2, d3), memory_config: (40, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x2>>, >
shape: #ttnn.shape<1x8x160x64>
tensor<[1,8,160,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 160 + d2, d3), memory_config: (40, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x8x160x9>
tensor<[1,8,160,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 160 + d2, d3), memory_config: (40, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x5>>, >
shape: #ttnn.shape<1x8x256x160>
tensor<[1,8,256,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 256 + d2, d3), memory_config: (64, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x2>>, >
shape: #ttnn.shape<1x8x4096x40>
tensor<[1,8,4096,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 4096 + d2, d3), memory_config: (1024, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x128>>, >
shape: #ttnn.shape<1x8x40x4096>
tensor<[1,8,40,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 40 + d2, d3), memory_config: (10, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x8x40x9>
tensor<[1,8,40,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 40 + d2, d3), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x5>>, >
shape: #ttnn.shape<1x8x64x160>
tensor<[1,8,64,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 64 + d2, d3), memory_config: (16, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x32>>, >
shape: #ttnn.shape<1x8x80x1024>
tensor<[1,8,80,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 80 + d2, d3), memory_config: (20, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x1>>, >
shape: #ttnn.shape<1x8x80x9>
tensor<[1,8,80,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 80 + d2, d3), memory_config: (20, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<2x8x64x7>
tensor<[2,8,64,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 64 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x2>>, >
shape: #ttnn.shape<2x8x7x64>
tensor<[2,8,7,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 7 + d2, d3), memory_config: (4, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<4x16x1x64>
tensor<[4,16,1,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 + d2, d3), memory_config: (2, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<4x16x64x1>
tensor<[4,16,64,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 64 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x1x256>
tensor<[100,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x1x256>
tensor<[100,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x64>>, >
shape: #ttnn.shape<100x2048>
tensor<[100,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x8>>, >
shape: #ttnn.shape<100x256>
tensor<[100,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<100x4>
tensor<[100,4,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<100x92>
tensor<[100,92,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1024>
tensor<[1024,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x10x1024>
tensor<[1,10,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1024x1536>
tensor<[1024,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1024x160>
tensor<[1024,160,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1024x3072>
tensor<[1024,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x160>>, >
shape: #ttnn.shape<1024x5120>
tensor<[1024,5120,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x192>>, >
shape: #ttnn.shape<1024x6144>
tensor<[1024,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1024x640>
tensor<[1024,640,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1024x768>
tensor<[1024,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7813>>, >
shape: #ttnn.shape<10x250002>
tensor<[10,250002,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 7813, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<10x3072>
tensor<[10,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<10x768>
tensor<[10,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x40>>, >
shape: #ttnn.shape<1200x1280>
tensor<[1200,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1200x320>
tensor<[1200,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6534x1>>, >
shape: #ttnn.shape<121x12x144x32>
tensor<[121,12,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1728 + d1 * 144 + d2, d3), memory_config: (6534, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3267x1>>, >
shape: #ttnn.shape<121x6x144x32>
tensor<[121,6,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 144 + d2, d3), memory_config: (3267, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x48>>, >
shape: #ttnn.shape<1296x1536>
tensor<[1296,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x72>>, >
shape: #ttnn.shape<1296x2304>
tensor<[1296,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x144>>, >
shape: #ttnn.shape<1296x4608>
tensor<[1296,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x24>>, >
shape: #ttnn.shape<1296x768>
tensor<[1296,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (41, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x12>
tensor<[12,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<12x1536>
tensor<[12,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<12x256>
tensor<[12,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12x2>
tensor<[12,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<12x3072>
tensor<[12,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<12x768>
tensor<[12,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1370x1280>
tensor<[1370,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x120>>, >
shape: #ttnn.shape<1370x3840>
tensor<[1370,3840,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 120, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x160>>, >
shape: #ttnn.shape<1370x5120>
tensor<[1370,5120,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (43, 160, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x13>
tensor<[13,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<13x2>
tensor<[13,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<13x3584>
tensor<[13,3584,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 112, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<13x512>
tensor<[13,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1445x192>
tensor<[1445,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x24>>, >
shape: #ttnn.shape<1445x768>
tensor<[1445,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (46, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<14x2048>
tensor<[14,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<14x2>
tensor<[14,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<14x3072>
tensor<[14,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<14x512>
tensor<[14,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<14x768>
tensor<[14,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x96>>, >
shape: #ttnn.shape<1500x3072>
tensor<[1500,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1500x768>
tensor<[1500,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1536>
tensor<[1536,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x10x1536>
tensor<[1,10,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x11x1536>
tensor<[1,11,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x13x1536>
tensor<[1,13,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x14x1536>
tensor<[1,14,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x15x1536>
tensor<[1,15,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1x1536>
tensor<[1,1,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x6x1536>
tensor<[1,6,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x7x1536>
tensor<[1,7,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x8x1536>
tensor<[1,8,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x9x1536>
tensor<[1,9,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 48, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<16384x128>
tensor<[16384,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x48>>, >
shape: #ttnn.shape<16384x1536>
tensor<[16384,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<16384x192>
tensor<[16384,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<16384x32>
tensor<[16384,32,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<16384x384>
tensor<[16384,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<16384x768>
tensor<[16384,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x32>
tensor<[1,16,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<16x3072>
tensor<[16,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<16x768>
tensor<[16,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<16x8x49x32>
tensor<[16,8,49,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 392 + d1 * 49 + d2, d3), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x36>>, >
shape: #ttnn.shape<17424x1152>
tensor<[17424,1152,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 36, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x6>>, >
shape: #ttnn.shape<17424x192>
tensor<[17424,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x12>>, >
shape: #ttnn.shape<17424x384>
tensor<[17424,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x18>>, >
shape: #ttnn.shape<17424x576>
tensor<[17424,576,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (545, 18, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x8>>, >
shape: #ttnn.shape<19200x256>
tensor<[19200,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<19200x64>
tensor<[19200,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x48>>, >
shape: #ttnn.shape<196x1536>
tensor<[196,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<196x3072>
tensor<[196,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<196x512>
tensor<[196,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<196x768>
tensor<[196,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<197x1024>
tensor<[197,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<197x3072>
tensor<[197,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x128>>, >
shape: #ttnn.shape<197x4096>
tensor<[197,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<197x768>
tensor<[197,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1000>
tensor<[1,1000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x100x14x14>
tensor<[1,100,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 14 + d2, d3), memory_config: (44, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1024x10x10>
tensor<[1,1024,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 10 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x1024x1536>
tensor<[1,1024,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x1024x1536>
tensor<[1,1024,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1x1024x160>
tensor<[1,1024,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1x1024x160>
tensor<[1,1024,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x1024x16x16>
tensor<[1,1024,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16 + d2, d3), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<608x1>>, >
shape: #ttnn.shape<1x1024x19x19>
tensor<[1,1024,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19456 + d1 * 19 + d2, d3), memory_config: (608, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x80>>, >
shape: #ttnn.shape<1x1024x2560>
tensor<[1,1024,2560,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 80, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x1024x28x28>
tensor<[1,1024,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 28 + d2, d3), memory_config: (896, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x1024x3072>
tensor<[1,1024,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x1024x3072>
tensor<[1,1024,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x3>>, >
shape: #ttnn.shape<1x1024x45x80>
tensor<[1,1024,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 45 + d2, d3), memory_config: (1440, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1x1024x768>
tensor<[1,1024,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1x1024x768>
tensor<[1,1024,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<462x1>>, >
shape: #ttnn.shape<1x1056x14x14>
tensor<[1,1056,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14784 + d1 * 14 + d2, d3), memory_config: (462, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<231x1>>, >
shape: #ttnn.shape<1x1056x7x7>
tensor<[1,1056,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7392 + d1 * 7 + d2, d3), memory_config: (231, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<476x1>>, >
shape: #ttnn.shape<1x1088x14x14>
tensor<[1,1088,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15232 + d1 * 14 + d2, d3), memory_config: (476, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x1088x7x7>
tensor<[1,1088,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 7 + d2, d3), memory_config: (238, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x10x1024>
tensor<[1,10,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x10x1536>
tensor<[1,10,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x10x512>
tensor<[1,10,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<490x1>>, >
shape: #ttnn.shape<1x1120x14x14>
tensor<[1,1120,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15680 + d1 * 14 + d2, d3), memory_config: (490, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x1>>, >
shape: #ttnn.shape<1x1120x7x7>
tensor<[1,1120,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 7 + d2, d3), memory_config: (245, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x112x14x14>
tensor<[1,112,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 14 + d2, d3), memory_config: (49, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x112x15x15>
tensor<[1,112,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 15 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x112x20x20>
tensor<[1,112,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 20 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x112x24x24>
tensor<[1,112,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 24 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x112x7x7>
tensor<[1,112,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 7 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x1152x14x14>
tensor<[1,1152,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 14 + d2, d3), memory_config: (504, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x1152x7x7>
tensor<[1,1152,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 7 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x1>>, >
shape: #ttnn.shape<1x1152x8x8>
tensor<[1,1152,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 8 + d2, d3), memory_config: (288, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x116x14x14>
tensor<[1,116,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 14 + d2, d3), memory_config: (51, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<518x1>>, >
shape: #ttnn.shape<1x1184x14x14>
tensor<[1,1184,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16576 + d1 * 14 + d2, d3), memory_config: (518, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<259x1>>, >
shape: #ttnn.shape<1x1184x7x7>
tensor<[1,1184,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8288 + d1 * 7 + d2, d3), memory_config: (259, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x11x1536>
tensor<[1,11,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1x1200x320>
tensor<[1,1200,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1x1200x320>
tensor<[1,1200,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x120x14x14>
tensor<[1,120,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 14 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x120x17x17>
tensor<[1,120,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2040 + d1 * 17 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x2>>, >
shape: #ttnn.shape<1x120x40x40>
tensor<[1,120,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 40 + d2, d3), memory_config: (150, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x2>>, >
shape: #ttnn.shape<1x120x40x40>
tensor<[1,120,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 40 + d2, d3), memory_config: (150, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<532x1>>, >
shape: #ttnn.shape<1x1216x14x14>
tensor<[1,1216,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17024 + d1 * 14 + d2, d3), memory_config: (532, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x1216x7x7>
tensor<[1,1216,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 7 + d2, d3), memory_config: (266, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<546x1>>, >
shape: #ttnn.shape<1x1248x14x14>
tensor<[1,1248,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17472 + d1 * 14 + d2, d3), memory_config: (546, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<273x1>>, >
shape: #ttnn.shape<1x1248x7x7>
tensor<[1,1248,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8736 + d1 * 7 + d2, d3), memory_config: (273, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<351x1>>, >
shape: #ttnn.shape<1x1248x9x9>
tensor<[1,1248,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11232 + d1 * 9 + d2, d3), memory_config: (351, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x40>>, >
shape: #ttnn.shape<1x1280>
tensor<[1,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<400x1>>, >
shape: #ttnn.shape<1x1280x10x10>
tensor<[1,1280,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12800 + d1 * 10 + d2, d3), memory_config: (400, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1280x12x12>
tensor<[1,1280,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 12 + d2, d3), memory_config: (480, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<560x1>>, >
shape: #ttnn.shape<1x1280x14x14>
tensor<[1,1280,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17920 + d1 * 14 + d2, d3), memory_config: (560, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x1280x16x16>
tensor<[1,1280,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 16 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x1280x32x32>
tensor<[1,1280,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 32 + d2, d3), memory_config: (1280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x1280x9x9>
tensor<[1,1280,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 9 + d2, d3), memory_config: (360, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x128>
tensor<[1,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x4>>, >
shape: #ttnn.shape<1x128x112x112>
tensor<[1,128,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 112 + d2, d3), memory_config: (448, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x128x14x14>
tensor<[1,128,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 14 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x5>>, >
shape: #ttnn.shape<1x128x150x150>
tensor<[1,128,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 150 + d2, d3), memory_config: (600, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<68x1>>, >
shape: #ttnn.shape<1x128x17x17>
tensor<[1,128,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2176 + d1 * 17 + d2, d3), memory_config: (68, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x10>>, >
shape: #ttnn.shape<1x128x180x320>
tensor<[1,128,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 180 + d2, d3), memory_config: (720, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x128x1x1>
tensor<[1,128,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x128x2x2>
tensor<[1,128,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 2 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x128x32x32>
tensor<[1,128,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 32 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x128x3x3>
tensor<[1,128,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 3 + d2, d3), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x1>>, >
shape: #ttnn.shape<1x128x5x5>
tensor<[1,128,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 5 + d2, d3), memory_config: (20, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x3>>, >
shape: #ttnn.shape<1x128x75x75>
tensor<[1,128,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 75 + d2, d3), memory_config: (300, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x128x7x7>
tensor<[1,128,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 7 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x5>>, >
shape: #ttnn.shape<1x128x90x160>
tensor<[1,128,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 90 + d2, d3), memory_config: (360, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<5x4>>, >
shape: #ttnn.shape<1x12x12x128>
tensor<[1,12,12,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (5, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x128>
tensor<[1,12,1,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<97x9>>, >
shape: #ttnn.shape<1x12x257x257>
tensor<[1,12,257,257,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3084 + d1 * 257 + d2, d3), memory_config: (97, 9, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x2>>, >
shape: #ttnn.shape<1x12x56x56>
tensor<[1,12,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 56 + d2, d3), memory_config: (21, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x12x768>
tensor<[1,12,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x12x768>
tensor<[1,12,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x280>>, >
shape: #ttnn.shape<1x12x8960>
tensor<[1,12,8960,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 280, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<574x1>>, >
shape: #ttnn.shape<1x1312x14x14>
tensor<[1,1312,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18368 + d1 * 14 + d2, d3), memory_config: (574, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x1312x7x7>
tensor<[1,1312,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9184 + d1 * 7 + d2, d3), memory_config: (287, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x1344x14x14>
tensor<[1,1344,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 14 + d2, d3), memory_config: (588, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x1>>, >
shape: #ttnn.shape<1x1344x28x28>
tensor<[1,1344,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 28 + d2, d3), memory_config: (1176, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x1344x7x7>
tensor<[1,1344,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 7 + d2, d3), memory_config: (294, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<118x1>>, >
shape: #ttnn.shape<1x134x28x28>
tensor<[1,134,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3752 + d1 * 28 + d2, d3), memory_config: (118, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x136x19x19>
tensor<[1,136,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2584 + d1 * 19 + d2, d3), memory_config: (81, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<602x1>>, >
shape: #ttnn.shape<1x1376x14x14>
tensor<[1,1376,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19264 + d1 * 14 + d2, d3), memory_config: (602, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<301x1>>, >
shape: #ttnn.shape<1x1376x7x7>
tensor<[1,1376,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9632 + d1 * 7 + d2, d3), memory_config: (301, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<435x1>>, >
shape: #ttnn.shape<1x1392x10x10>
tensor<[1,1392,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13920 + d1 * 10 + d2, d3), memory_config: (435, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x1392x14x14>
tensor<[1,1392,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 14 + d2, d3), memory_config: (609, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x1392x14x14>
tensor<[1,1392,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 14 + d2, d3), memory_config: (609, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x1>>, >
shape: #ttnn.shape<1x1392x28x28>
tensor<[1,1392,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 28 + d2, d3), memory_config: (1218, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x13x128>
tensor<[1,13,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x13x1536>
tensor<[1,13,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x592>>, >
shape: #ttnn.shape<1x13x18944>
tensor<[1,13,18944,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 592, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<1x13x3584>
tensor<[1,13,3584,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 112, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<616x1>>, >
shape: #ttnn.shape<1x1408x14x14>
tensor<[1,1408,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19712 + d1 * 14 + d2, d3), memory_config: (616, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x1408x7x7>
tensor<[1,1408,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 7 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<630x1>>, >
shape: #ttnn.shape<1x1440x14x14>
tensor<[1,1440,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20160 + d1 * 14 + d2, d3), memory_config: (630, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x1440x7x7>
tensor<[1,1440,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 7 + d2, d3), memory_config: (315, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x144x14x14>
tensor<[1,144,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 14 + d2, d3), memory_config: (63, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<675x5>>, >
shape: #ttnn.shape<1x144x150x150>
tensor<[1,144,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21600 + d1 * 150 + d2, d3), memory_config: (675, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<855x6>>, >
shape: #ttnn.shape<1x144x190x190>
tensor<[1,144,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27360 + d1 * 190 + d2, d3), memory_config: (855, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x144x28x28>
tensor<[1,144,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 28 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<135x1>>, >
shape: #ttnn.shape<1x144x30x30>
tensor<[1,144,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4320 + d1 * 30 + d2, d3), memory_config: (135, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x2>>, >
shape: #ttnn.shape<1x144x33x33>
tensor<[1,144,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 33 + d2, d3), memory_config: (149, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x2>>, >
shape: #ttnn.shape<1x144x56x56>
tensor<[1,144,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 56 + d2, d3), memory_config: (252, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<270x2>>, >
shape: #ttnn.shape<1x144x60x60>
tensor<[1,144,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8640 + d1 * 60 + d2, d3), memory_config: (270, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<293x3>>, >
shape: #ttnn.shape<1x144x65x65>
tensor<[1,144,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9360 + d1 * 65 + d2, d3), memory_config: (293, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<338x3>>, >
shape: #ttnn.shape<1x144x75x75>
tensor<[1,144,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10800 + d1 * 75 + d2, d3), memory_config: (338, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x144x7x7>
tensor<[1,144,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 7 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<428x3>>, >
shape: #ttnn.shape<1x144x95x95>
tensor<[1,144,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13680 + d1 * 95 + d2, d3), memory_config: (428, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<644x1>>, >
shape: #ttnn.shape<1x1472x14x14>
tensor<[1,1472,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20608 + d1 * 14 + d2, d3), memory_config: (644, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x1472x7x7>
tensor<[1,1472,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 7 + d2, d3), memory_config: (322, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x14x14x1024>
tensor<[1,14,14,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x14x14x1024>
tensor<[1,14,14,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x14x1536>
tensor<[1,14,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x14x56x56>
tensor<[1,14,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 56 + d2, d3), memory_config: (25, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x14x768>
tensor<[1,14,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x14x768>
tensor<[1,14,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<658x1>>, >
shape: #ttnn.shape<1x1504x14x14>
tensor<[1,1504,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21056 + d1 * 14 + d2, d3), memory_config: (658, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<329x1>>, >
shape: #ttnn.shape<1x1504x7x7>
tensor<[1,1504,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10528 + d1 * 7 + d2, d3), memory_config: (329, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1536x10x10>
tensor<[1,1536,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 10 + d2, d3), memory_config: (480, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x1536x14x14>
tensor<[1,1536,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 14 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x1536x7x7>
tensor<[1,1536,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 7 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<686x1>>, >
shape: #ttnn.shape<1x1568x14x14>
tensor<[1,1568,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21952 + d1 * 14 + d2, d3), memory_config: (686, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<343x1>>, >
shape: #ttnn.shape<1x1568x7x7>
tensor<[1,1568,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10976 + d1 * 7 + d2, d3), memory_config: (343, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x15x1536>
tensor<[1,15,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x15x512>
tensor<[1,15,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<700x1>>, >
shape: #ttnn.shape<1x1600x14x14>
tensor<[1,1600,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22400 + d1 * 14 + d2, d3), memory_config: (700, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x1600x7x7>
tensor<[1,1600,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 7 + d2, d3), memory_config: (350, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x160x14x14>
tensor<[1,160,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 14 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x160x24x24>
tensor<[1,160,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 24 + d2, d3), memory_config: (120, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x160x28x28>
tensor<[1,160,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 28 + d2, d3), memory_config: (140, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x2>>, >
shape: #ttnn.shape<1x160x56x56>
tensor<[1,160,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 56 + d2, d3), memory_config: (280, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<612x1>>, >
shape: #ttnn.shape<1x1632x12x12>
tensor<[1,1632,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19584 + d1 * 12 + d2, d3), memory_config: (612, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<714x1>>, >
shape: #ttnn.shape<1x1632x14x14>
tensor<[1,1632,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22848 + d1 * 14 + d2, d3), memory_config: (714, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<357x1>>, >
shape: #ttnn.shape<1x1632x7x7>
tensor<[1,1632,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11424 + d1 * 7 + d2, d3), memory_config: (357, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<1x16384x192>
tensor<[1,16384,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<1x16384x192>
tensor<[1,16384,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<1x16384x384>
tensor<[1,16384,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<1x16384x384>
tensor<[1,16384,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x16384x768>
tensor<[1,16384,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x16384x768>
tensor<[1,16384,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<728x1>>, >
shape: #ttnn.shape<1x1664x14x14>
tensor<[1,1664,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23296 + d1 * 14 + d2, d3), memory_config: (728, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x1664x7x7>
tensor<[1,1664,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 7 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x168x28x28>
tensor<[1,168,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 28 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<742x1>>, >
shape: #ttnn.shape<1x1696x14x14>
tensor<[1,1696,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23744 + d1 * 14 + d2, d3), memory_config: (742, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<371x1>>, >
shape: #ttnn.shape<1x1696x7x7>
tensor<[1,1696,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11872 + d1 * 7 + d2, d3), memory_config: (371, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x4>>, >
shape: #ttnn.shape<1x16x120x120>
tensor<[1,16,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 120 + d2, d3), memory_config: (60, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<65x5>>, >
shape: #ttnn.shape<1x16x130x130>
tensor<[1,16,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2080 + d1 * 130 + d2, d3), memory_config: (65, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x16x14x14>
tensor<[1,16,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x5>>, >
shape: #ttnn.shape<1x16x160x160>
tensor<[1,16,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 160 + d2, d3), memory_config: (80, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x7>>, >
shape: #ttnn.shape<1x16x224x224>
tensor<[1,16,224,224,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 224 + d2, d3), memory_config: (112, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x16x28x28>
tensor<[1,16,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x2>>, >
shape: #ttnn.shape<1x16x56x56>
tensor<[1,16,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 56 + d2, d3), memory_config: (28, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<756x1>>, >
shape: #ttnn.shape<1x1728x14x14>
tensor<[1,1728,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24192 + d1 * 14 + d2, d3), memory_config: (756, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x1728x7x7>
tensor<[1,1728,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 7 + d2, d3), memory_config: (378, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<770x1>>, >
shape: #ttnn.shape<1x1760x14x14>
tensor<[1,1760,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24640 + d1 * 14 + d2, d3), memory_config: (770, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<385x1>>, >
shape: #ttnn.shape<1x1760x7x7>
tensor<[1,1760,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12320 + d1 * 7 + d2, d3), memory_config: (385, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<784x1>>, >
shape: #ttnn.shape<1x1792x14x14>
tensor<[1,1792,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25088 + d1 * 14 + d2, d3), memory_config: (784, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x1792x7x7>
tensor<[1,1792,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 7 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<399x1>>, >
shape: #ttnn.shape<1x1824x7x7>
tensor<[1,1824,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12768 + d1 * 7 + d2, d3), memory_config: (399, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x184x14x14>
tensor<[1,184,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2576 + d1 * 14 + d2, d3), memory_config: (81, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x184x14x14>
tensor<[1,184,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2576 + d1 * 14 + d2, d3), memory_config: (81, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<115x1>>, >
shape: #ttnn.shape<1x184x20x20>
tensor<[1,184,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3680 + d1 * 20 + d2, d3), memory_config: (115, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x184x7x7>
tensor<[1,184,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 7 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x1856x7x7>
tensor<[1,1856,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 7 + d2, d3), memory_config: (406, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<413x1>>, >
shape: #ttnn.shape<1x1888x7x7>
tensor<[1,1888,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13216 + d1 * 7 + d2, d3), memory_config: (413, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x18x14x14>
tensor<[1,18,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 14 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x18x28x28>
tensor<[1,18,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 28 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x18x56x56>
tensor<[1,18,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 56 + d2, d3), memory_config: (32, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x18x7x7>
tensor<[1,18,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 126 + d1 * 7 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x1920x16x16>
tensor<[1,1920,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 16 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x1>>, >
shape: #ttnn.shape<1x1920x32x32>
tensor<[1,1920,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 32 + d2, d3), memory_config: (1920, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x1920x7x7>
tensor<[1,1920,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 7 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x192x14x14>
tensor<[1,192,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 14 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x192x17x17>
tensor<[1,192,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 17 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x192x28x28>
tensor<[1,192,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 28 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x2>>, >
shape: #ttnn.shape<1x192x35x35>
tensor<[1,192,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 35 + d2, d3), memory_config: (210, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<228x2>>, >
shape: #ttnn.shape<1x192x38x38>
tensor<[1,192,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7296 + d1 * 38 + d2, d3), memory_config: (228, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x2>>, >
shape: #ttnn.shape<1x192x48x48>
tensor<[1,192,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 48 + d2, d3), memory_config: (288, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x2>>, >
shape: #ttnn.shape<1x192x56x56>
tensor<[1,192,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 56 + d2, d3), memory_config: (336, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<450x3>>, >
shape: #ttnn.shape<1x192x75x75>
tensor<[1,192,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 75 + d2, d3), memory_config: (450, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x192x7x7>
tensor<[1,192,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 7 + d2, d3), memory_config: (42, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x192x8x8>
tensor<[1,192,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 8 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<570x3>>, >
shape: #ttnn.shape<1x192x95x95>
tensor<[1,192,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18240 + d1 * 95 + d2, d3), memory_config: (570, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x196x14x14>
tensor<[1,196,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 14 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x128>
tensor<[1,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1x1536>
tensor<[1,1,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x16x32>
tensor<[1,1,16,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x10>
tensor<[1,1,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x15>
tensor<[1,1,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7>>, >
shape: #ttnn.shape<1x1x1x201>
tensor<[1,1,1,201,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x1x1x2048>
tensor<[1,1,1,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x8>
tensor<[1,1,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (7, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x20>>, >
shape: #ttnn.shape<1x1x480x640>
tensor<[1,1,480,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 * 480 + d2, d3), memory_config: (15, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x7x64>
tensor<[1,1,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x280>>, >
shape: #ttnn.shape<1x1x8960>
tensor<[1,1,8960,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 280, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<1x200x14x14>
tensor<[1,200,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2800 + d1 * 14 + d2, d3), memory_config: (88, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<1x200x14x14>
tensor<[1,200,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2800 + d1 * 14 + d2, d3), memory_config: (88, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<125x1>>, >
shape: #ttnn.shape<1x200x20x20>
tensor<[1,200,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4000 + d1 * 20 + d2, d3), memory_config: (125, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x200x7x7>
tensor<[1,200,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 7 + d2, d3), memory_config: (44, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2048x10x10>
tensor<[1,2048,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 10 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x2048x14x14>
tensor<[1,2048,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 14 + d2, d3), memory_config: (896, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1472x2>>, >
shape: #ttnn.shape<1x2048x23x40>
tensor<[1,2048,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 47104 + d1 * 23 + d2, d3), memory_config: (1472, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x2048x7x7>
tensor<[1,2048,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 7 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<91x1>>, >
shape: #ttnn.shape<1x208x14x14>
tensor<[1,208,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2912 + d1 * 14 + d2, d3), memory_config: (91, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<59x1>>, >
shape: #ttnn.shape<1x208x9x9>
tensor<[1,208,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1872 + d1 * 9 + d2, d3), memory_config: (59, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x20x28x28>
tensor<[1,20,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 28 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x683>>, >
shape: #ttnn.shape<1x21843>
tensor<[1,21843,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 683, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x224x14x14>
tensor<[1,224,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 14 + d2, d3), memory_config: (98, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x1>>, >
shape: #ttnn.shape<1x224x17x17>
tensor<[1,224,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 17 + d2, d3), memory_config: (119, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x224x28x28>
tensor<[1,224,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 28 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x2>>, >
shape: #ttnn.shape<1x224x35x35>
tensor<[1,224,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 35 + d2, d3), memory_config: (245, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x224x56x56>
tensor<[1,224,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 56 + d2, d3), memory_config: (392, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x224x7x7>
tensor<[1,224,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 7 + d2, d3), memory_config: (49, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<73x1>>, >
shape: #ttnn.shape<1x232x10x10>
tensor<[1,232,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2320 + d1 * 10 + d2, d3), memory_config: (73, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x4>>, >
shape: #ttnn.shape<1x232x112x112>
tensor<[1,232,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 112 + d2, d3), memory_config: (812, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x2>>, >
shape: #ttnn.shape<1x232x56x56>
tensor<[1,232,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 56 + d2, d3), memory_config: (406, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x2>>, >
shape: #ttnn.shape<1x232x56x56>
tensor<[1,232,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 56 + d2, d3), memory_config: (406, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x1>>, >
shape: #ttnn.shape<1x240x15x15>
tensor<[1,240,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 15 + d2, d3), memory_config: (113, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x240x20x20>
tensor<[1,240,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 20 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x240x30x30>
tensor<[1,240,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 30 + d2, d3), memory_config: (225, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x2>>, >
shape: #ttnn.shape<1x240x40x40>
tensor<[1,240,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 40 + d2, d3), memory_config: (300, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x4>>, >
shape: #ttnn.shape<1x24x112x112>
tensor<[1,24,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 112 + d2, d3), memory_config: (84, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x24x14x14>
tensor<[1,24,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 14 + d2, d3), memory_config: (11, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x5>>, >
shape: #ttnn.shape<1x24x150x150>
tensor<[1,24,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 150 + d2, d3), memory_config: (113, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<143x6>>, >
shape: #ttnn.shape<1x24x190x190>
tensor<[1,24,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4560 + d1 * 190 + d2, d3), memory_config: (143, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x24x28x28>
tensor<[1,24,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 28 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x4>>, >
shape: #ttnn.shape<1x24x32x128>
tensor<[1,24,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x2>>, >
shape: #ttnn.shape<1x24x56x56>
tensor<[1,24,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 56 + d2, d3), memory_config: (42, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<45x2>>, >
shape: #ttnn.shape<1x24x60x60>
tensor<[1,24,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1440 + d1 * 60 + d2, d3), memory_config: (45, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x3>>, >
shape: #ttnn.shape<1x24x65x65>
tensor<[1,24,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1560 + d1 * 65 + d2, d3), memory_config: (49, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x24x80x80>
tensor<[1,24,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 80 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1103x1>>, >
shape: #ttnn.shape<1x2520x14x14>
tensor<[1,2520,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 35280 + d1 * 14 + d2, d3), memory_config: (1103, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<552x1>>, >
shape: #ttnn.shape<1x2520x7x7>
tensor<[1,2520,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17640 + d1 * 7 + d2, d3), memory_config: (552, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x2560x16x16>
tensor<[1,2560,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 16 + d2, d3), memory_config: (1280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2560x8x8>
tensor<[1,2560,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 8 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<1x256>
tensor<[1,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x256x10x10>
tensor<[1,256,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 10 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<1x256x128x128>
tensor<[1,256,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 128 + d2, d3), memory_config: (1024, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x256x14x14>
tensor<[1,256,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 14 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<1x256x1536>
tensor<[1,256,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<1x256x1536>
tensor<[1,256,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x256x160>
tensor<[1,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x256x160>
tensor<[1,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x1>>, >
shape: #ttnn.shape<1x256x17x17>
tensor<[1,256,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4352 + d1 * 17 + d2, d3), memory_config: (136, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x10>>, >
shape: #ttnn.shape<1x256x180x320>
tensor<[1,256,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 180 + d2, d3), memory_config: (1440, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x256x2x2>
tensor<[1,256,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 2 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x256x3072>
tensor<[1,256,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x256x3072>
tensor<[1,256,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x32>
tensor<[1,256,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x32>
tensor<[1,256,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<304x2>>, >
shape: #ttnn.shape<1x256x38x38>
tensor<[1,256,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9728 + d1 * 38 + d2, d3), memory_config: (304, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x256x3x3>
tensor<[1,256,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 3 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x3>>, >
shape: #ttnn.shape<1x256x45x80>
tensor<[1,256,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 45 + d2, d3), memory_config: (360, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x160>>, >
shape: #ttnn.shape<1x256x5120>
tensor<[1,256,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 160, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x256x5x5>
tensor<[1,256,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 5 + d2, d3), memory_config: (40, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x256x64>
tensor<[1,256,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x256x64>
tensor<[1,256,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x3>>, >
shape: #ttnn.shape<1x256x75x75>
tensor<[1,256,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 75 + d2, d3), memory_config: (600, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x256x7x7>
tensor<[1,256,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 7 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x256x8x8>
tensor<[1,256,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 8 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x5>>, >
shape: #ttnn.shape<1x256x90x160>
tensor<[1,256,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 90 + d2, d3), memory_config: (720, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x272x12x12>
tensor<[1,272,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 12 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x1>>, >
shape: #ttnn.shape<1x272x7x7>
tensor<[1,272,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1904 + d1 * 7 + d2, d3), memory_config: (60, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x27x768>
tensor<[1,27,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x27x768>
tensor<[1,27,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x288x14x14>
tensor<[1,288,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 14 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<153x1>>, >
shape: #ttnn.shape<1x288x17x17>
tensor<[1,288,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4896 + d1 * 17 + d2, d3), memory_config: (153, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<171x1>>, >
shape: #ttnn.shape<1x288x19x19>
tensor<[1,288,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5472 + d1 * 19 + d2, d3), memory_config: (171, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x288x28x28>
tensor<[1,288,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 28 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<297x2>>, >
shape: #ttnn.shape<1x288x33x33>
tensor<[1,288,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9504 + d1 * 33 + d2, d3), memory_config: (297, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x2>>, >
shape: #ttnn.shape<1x288x38x38>
tensor<[1,288,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 38 + d2, d3), memory_config: (342, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x4>>, >
shape: #ttnn.shape<1x28x13x128>
tensor<[1,28,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x28>
tensor<[1,28,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x16>>, >
shape: #ttnn.shape<1x28x28x512>
tensor<[1,28,28,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x16>>, >
shape: #ttnn.shape<1x28x28x512>
tensor<[1,28,28,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x2>
tensor<[1,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x12x128>
tensor<[1,2,12,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 12 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x2x1x128>
tensor<[1,2,1,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x300x128>
tensor<[1,300,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x300x128>
tensor<[1,300,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<1x300x320>
tensor<[1,300,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<1x300x320>
tensor<[1,300,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x300x512>
tensor<[1,300,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x300x512>
tensor<[1,300,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x300x64>
tensor<[1,300,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x300x64>
tensor<[1,300,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x3072>
tensor<[1,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x3072x10x16>
tensor<[1,3072,10,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 10 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x3072x10x16>
tensor<[1,3072,10,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 10 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1056x1>>, >
shape: #ttnn.shape<1x3072x11x16>
tensor<[1,3072,11,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 33792 + d1 * 11 + d2, d3), memory_config: (1056, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1056x1>>, >
shape: #ttnn.shape<1x3072x11x16>
tensor<[1,3072,11,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 33792 + d1 * 11 + d2, d3), memory_config: (1056, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1152x1>>, >
shape: #ttnn.shape<1x3072x12x16>
tensor<[1,3072,12,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36864 + d1 * 12 + d2, d3), memory_config: (1152, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1152x1>>, >
shape: #ttnn.shape<1x3072x12x16>
tensor<[1,3072,12,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36864 + d1 * 12 + d2, d3), memory_config: (1152, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1248x1>>, >
shape: #ttnn.shape<1x3072x13x16>
tensor<[1,3072,13,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 39936 + d1 * 13 + d2, d3), memory_config: (1248, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1248x1>>, >
shape: #ttnn.shape<1x3072x13x16>
tensor<[1,3072,13,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 39936 + d1 * 13 + d2, d3), memory_config: (1248, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1344x1>>, >
shape: #ttnn.shape<1x3072x14x16>
tensor<[1,3072,14,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 43008 + d1 * 14 + d2, d3), memory_config: (1344, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1344x1>>, >
shape: #ttnn.shape<1x3072x14x16>
tensor<[1,3072,14,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 43008 + d1 * 14 + d2, d3), memory_config: (1344, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x1>>, >
shape: #ttnn.shape<1x3072x15x16>
tensor<[1,3072,15,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 15 + d2, d3), memory_config: (1440, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x1>>, >
shape: #ttnn.shape<1x3072x15x16>
tensor<[1,3072,15,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 15 + d2, d3), memory_config: (1440, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x16>
tensor<[1,3072,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x3072x10x16>
tensor<[1,3072,10,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 10 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1056x1>>, >
shape: #ttnn.shape<1x3072x11x16>
tensor<[1,3072,11,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 33792 + d1 * 11 + d2, d3), memory_config: (1056, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1152x1>>, >
shape: #ttnn.shape<1x3072x12x16>
tensor<[1,3072,12,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 36864 + d1 * 12 + d2, d3), memory_config: (1152, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1248x1>>, >
shape: #ttnn.shape<1x3072x13x16>
tensor<[1,3072,13,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 39936 + d1 * 13 + d2, d3), memory_config: (1248, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1344x1>>, >
shape: #ttnn.shape<1x3072x14x16>
tensor<[1,3072,14,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 43008 + d1 * 14 + d2, d3), memory_config: (1344, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x1>>, >
shape: #ttnn.shape<1x3072x15x16>
tensor<[1,3072,15,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 15 + d2, d3), memory_config: (1440, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<576x1>>, >
shape: #ttnn.shape<1x3072x6x16>
tensor<[1,3072,6,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18432 + d1 * 6 + d2, d3), memory_config: (576, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x3072x7x16>
tensor<[1,3072,7,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 7 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<1x3072x8x16>
tensor<[1,3072,8,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 8 + d2, d3), memory_config: (768, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x1>>, >
shape: #ttnn.shape<1x3072x9x16>
tensor<[1,3072,9,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27648 + d1 * 9 + d2, d3), memory_config: (864, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<576x1>>, >
shape: #ttnn.shape<1x3072x6x16>
tensor<[1,3072,6,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18432 + d1 * 6 + d2, d3), memory_config: (576, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<576x1>>, >
shape: #ttnn.shape<1x3072x6x16>
tensor<[1,3072,6,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18432 + d1 * 6 + d2, d3), memory_config: (576, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x3072x7x16>
tensor<[1,3072,7,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 7 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x3072x7x16>
tensor<[1,3072,7,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 7 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<1x3072x8x16>
tensor<[1,3072,8,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 8 + d2, d3), memory_config: (768, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<768x1>>, >
shape: #ttnn.shape<1x3072x8x16>
tensor<[1,3072,8,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24576 + d1 * 8 + d2, d3), memory_config: (768, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x1>>, >
shape: #ttnn.shape<1x3072x9x16>
tensor<[1,3072,9,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27648 + d1 * 9 + d2, d3), memory_config: (864, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x1>>, >
shape: #ttnn.shape<1x3072x9x16>
tensor<[1,3072,9,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27648 + d1 * 9 + d2, d3), memory_config: (864, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x98>>, >
shape: #ttnn.shape<1x3129>
tensor<[1,3129,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 98, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x320x14x14>
tensor<[1,320,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 14 + d2, d3), memory_config: (140, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<170x1>>, >
shape: #ttnn.shape<1x320x17x17>
tensor<[1,320,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5440 + d1 * 17 + d2, d3), memory_config: (170, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x320x28x28>
tensor<[1,320,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 28 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x320x32x32>
tensor<[1,320,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 32 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x320x7x7>
tensor<[1,320,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 7 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x320x8x8>
tensor<[1,320,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 8 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32>
tensor<[1,32,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x32>>, >
shape: #ttnn.shape<1x32x10x1024>
tensor<[1,32,10,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 10 + d2, d3), memory_config: (10, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x128>>, >
shape: #ttnn.shape<1x32x10x4096>
tensor<[1,32,10,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 10 + d2, d3), memory_config: (10, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x344>>, >
shape: #ttnn.shape<1x32x11008>
tensor<[1,32,11008,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 344, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x4>>, >
shape: #ttnn.shape<1x32x112x112>
tensor<[1,32,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 112 + d2, d3), memory_config: (112, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x4>>, >
shape: #ttnn.shape<1x32x120x120>
tensor<[1,32,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x5>>, >
shape: #ttnn.shape<1x32x120x160>
tensor<[1,32,120,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x32x128>
tensor<[1,32,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<1x32x128x128>
tensor<[1,32,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<130x5>>, >
shape: #ttnn.shape<1x32x130x130>
tensor<[1,32,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4160 + d1 * 130 + d2, d3), memory_config: (130, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x5>>, >
shape: #ttnn.shape<1x32x147x147>
tensor<[1,32,147,147,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 147 + d2, d3), memory_config: (147, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x5>>, >
shape: #ttnn.shape<1x32x149x149>
tensor<[1,32,149,149,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4768 + d1 * 149 + d2, d3), memory_config: (149, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x32x14x14>
tensor<[1,32,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 14 + d2, d3), memory_config: (14, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x5>>, >
shape: #ttnn.shape<1x32x150x150>
tensor<[1,32,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 150 + d2, d3), memory_config: (150, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<190x6>>, >
shape: #ttnn.shape<1x32x190x190>
tensor<[1,32,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6080 + d1 * 190 + d2, d3), memory_config: (190, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x32>>, >
shape: #ttnn.shape<1x32x20x1024>
tensor<[1,32,20,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x8>>, >
shape: #ttnn.shape<1x32x20x256>
tensor<[1,32,20,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x128>>, >
shape: #ttnn.shape<1x32x20x4096>
tensor<[1,32,20,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x32x256x256>
tensor<[1,32,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 256 + d2, d3), memory_config: (256, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x32x28x28>
tensor<[1,32,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 28 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x32x3072>
tensor<[1,32,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x32>>, >
shape: #ttnn.shape<1x32x30x1024>
tensor<[1,32,30,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x128>>, >
shape: #ttnn.shape<1x32x30x4096>
tensor<[1,32,30,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x32x30x40>
tensor<[1,32,30,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x4>>, >
shape: #ttnn.shape<1x32x32x128>
tensor<[1,32,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x32x4096>
tensor<[1,32,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x32>>, >
shape: #ttnn.shape<1x32x40x1024>
tensor<[1,32,40,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x8>>, >
shape: #ttnn.shape<1x32x40x256>
tensor<[1,32,40,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x2>>, >
shape: #ttnn.shape<1x32x40x64>
tensor<[1,32,40,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x32x49x32>
tensor<[1,32,49,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 49 + d2, d3), memory_config: (49, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x16>>, >
shape: #ttnn.shape<1x32x512x512>
tensor<[1,32,512,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 512 + d2, d3), memory_config: (512, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x2>>, >
shape: #ttnn.shape<1x32x56x56>
tensor<[1,32,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 56 + d2, d3), memory_config: (56, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x32>>, >
shape: #ttnn.shape<1x32x60x1024>
tensor<[1,32,60,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x8>>, >
shape: #ttnn.shape<1x32x60x256>
tensor<[1,32,60,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x32x60x80>
tensor<[1,32,60,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x3>>, >
shape: #ttnn.shape<1x32x75x75>
tensor<[1,32,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2400 + d1 * 75 + d2, d3), memory_config: (75, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x32x7x7>
tensor<[1,32,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 7 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x8>>, >
shape: #ttnn.shape<1x32x80x256>
tensor<[1,32,80,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 80 + d2, d3), memory_config: (80, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x2>>, >
shape: #ttnn.shape<1x32x80x64>
tensor<[1,32,80,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 80 + d2, d3), memory_config: (80, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x32x8192>
tensor<[1,32,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<95x3>>, >
shape: #ttnn.shape<1x32x95x95>
tensor<[1,32,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3040 + d1 * 95 + d2, d3), memory_config: (95, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x334x14x14>
tensor<[1,334,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4676 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x4>>, >
shape: #ttnn.shape<1x336x112x112>
tensor<[1,336,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 112 + d2, d3), memory_config: (1176, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x336x14x14>
tensor<[1,336,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x336x24x24>
tensor<[1,336,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 24 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x2>>, >
shape: #ttnn.shape<1x336x48x48>
tensor<[1,336,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 48 + d2, d3), memory_config: (504, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x2>>, >
shape: #ttnn.shape<1x336x56x56>
tensor<[1,336,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 56 + d2, d3), memory_config: (588, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x34x28x28>
tensor<[1,34,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 28 + d2, d3), memory_config: (30, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<154x1>>, >
shape: #ttnn.shape<1x352x14x14>
tensor<[1,352,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4928 + d1 * 14 + d2, d3), memory_config: (154, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x352x28x28>
tensor<[1,352,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 28 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x1>>, >
shape: #ttnn.shape<1x352x9x9>
tensor<[1,352,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3168 + d1 * 9 + d2, d3), memory_config: (99, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x36x14x14>
tensor<[1,36,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 14 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x36x28x28>
tensor<[1,36,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 28 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x2>>, >
shape: #ttnn.shape<1x36x56x56>
tensor<[1,36,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 56 + d2, d3), memory_config: (63, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x36x7x7>
tensor<[1,36,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 7 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1624x1>>, >
shape: #ttnn.shape<1x3712x14x14>
tensor<[1,3712,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 51968 + d1 * 14 + d2, d3), memory_config: (1624, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x1>>, >
shape: #ttnn.shape<1x3712x7x7>
tensor<[1,3712,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 7 + d2, d3), memory_config: (812, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x1>>, >
shape: #ttnn.shape<1x3712x7x7>
tensor<[1,3712,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 7 + d2, d3), memory_config: (812, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x384x10x10>
tensor<[1,384,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 10 + d2, d3), memory_config: (120, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x384x14x14>
tensor<[1,384,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 14 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<204x1>>, >
shape: #ttnn.shape<1x384x17x17>
tensor<[1,384,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6528 + d1 * 17 + d2, d3), memory_config: (204, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x384x28x28>
tensor<[1,384,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 28 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x384x7x7>
tensor<[1,384,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 7 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x384x8x8>
tensor<[1,384,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 * 8 + d2, d3), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x3>
tensor<[1,3,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x4096>
tensor<[1,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x40>>, >
shape: #ttnn.shape<1x4096x1280>
tensor<[1,4096,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x4096x1536>
tensor<[1,4096,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x4096x1536>
tensor<[1,4096,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<1x4096x384>
tensor<[1,4096,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<1x4096x384>
tensor<[1,4096,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x4096x64>
tensor<[1,4096,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x4096x64>
tensor<[1,4096,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x4096x768>
tensor<[1,4096,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x4096x768>
tensor<[1,4096,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x40x14x14>
tensor<[1,40,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 14 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x40x28x28>
tensor<[1,40,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 28 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x40x30x30>
tensor<[1,40,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 30 + d2, d3), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x40x40x40>
tensor<[1,40,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x40x56x56>
tensor<[1,40,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 56 + d2, d3), memory_config: (70, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<182x1>>, >
shape: #ttnn.shape<1x416x14x14>
tensor<[1,416,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5824 + d1 * 14 + d2, d3), memory_config: (182, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x416x28x28>
tensor<[1,416,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 28 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x448x12x12>
tensor<[1,448,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 12 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x448x14x14>
tensor<[1,448,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 14 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x448x28x28>
tensor<[1,448,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 28 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x448x8x8>
tensor<[1,448,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 8 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x462x7x7>
tensor<[1,462,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3234 + d1 * 7 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x46>
tensor<[1,46,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x46x28x28>
tensor<[1,46,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 28 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x47>
tensor<[1,47,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<1x4800x128>
tensor<[1,4800,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<1x4800x128>
tensor<[1,4800,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x480x10x10>
tensor<[1,480,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 10 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x480x15x15>
tensor<[1,480,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 15 + d2, d3), memory_config: (225, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x480x10x10>
tensor<[1,480,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 10 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x1>>, >
shape: #ttnn.shape<1x480x20x20>
tensor<[1,480,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 20 + d2, d3), memory_config: (300, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x1>>, >
shape: #ttnn.shape<1x480x20x20>
tensor<[1,480,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 20 + d2, d3), memory_config: (300, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x480x28x28>
tensor<[1,480,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 28 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x480x7x7>
tensor<[1,480,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 7 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x48>
tensor<[1,48,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x48x14x14>
tensor<[1,48,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 14 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x48x33x33>
tensor<[1,48,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 33 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x2>>, >
shape: #ttnn.shape<1x48x38x38>
tensor<[1,48,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 38 + d2, d3), memory_config: (57, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x48x56x56>
tensor<[1,48,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 56 + d2, d3), memory_config: (84, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x48x7x7>
tensor<[1,48,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 7 + d2, d3), memory_config: (11, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x49>
tensor<[1,49,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x4>>, >
shape: #ttnn.shape<1x4x13x128>
tensor<[1,4,13,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 13 + d2, d3), memory_config: (2, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x50>
tensor<[1,50,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x50x3072>
tensor<[1,50,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x50x3072>
tensor<[1,50,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<1x51200>
tensor<[1,51200,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<368x2>>, >
shape: #ttnn.shape<1x512x23x40>
tensor<[1,512,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11776 + d1 * 23 + d2, d3), memory_config: (368, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x512x32x32>
tensor<[1,512,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 32 + d2, d3), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x3>>, >
shape: #ttnn.shape<1x512x45x80>
tensor<[1,512,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 45 + d2, d3), memory_config: (720, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x2>>, >
shape: #ttnn.shape<1x512x56x56>
tensor<[1,512,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 56 + d2, d3), memory_config: (896, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x512x5x5>
tensor<[1,512,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 5 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x512x8x8>
tensor<[1,512,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 8 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x5>>, >
shape: #ttnn.shape<1x512x90x160>
tensor<[1,512,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 90 + d2, d3), memory_config: (1440, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x51>
tensor<[1,51,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<281x1>>, >
shape: #ttnn.shape<1x528x17x17>
tensor<[1,528,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8976 + d1 * 17 + d2, d3), memory_config: (281, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x52>
tensor<[1,52,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x53>
tensor<[1,53,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x544x14x14>
tensor<[1,544,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 14 + d2, d3), memory_config: (238, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x54>
tensor<[1,54,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x55>
tensor<[1,55,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x56>
tensor<[1,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x56x14x14>
tensor<[1,56,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 14 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x56x48x48>
tensor<[1,56,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 48 + d2, d3), memory_config: (84, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x576x14x14>
tensor<[1,576,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 14 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x1>>, >
shape: #ttnn.shape<1x576x19x19>
tensor<[1,576,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 19 + d2, d3), memory_config: (342, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x576x7x7>
tensor<[1,576,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 7 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x57>
tensor<[1,57,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x58>
tensor<[1,58,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x58x28x28>
tensor<[1,58,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 28 + d2, d3), memory_config: (51, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x59>
tensor<[1,59,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x5x16x32>
tensor<[1,5,16,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (3, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x608x14x14>
tensor<[1,608,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 14 + d2, d3), memory_config: (266, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x60>
tensor<[1,60,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x60x28x28>
tensor<[1,60,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 28 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x61>
tensor<[1,61,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x62>
tensor<[1,62,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x63>
tensor<[1,63,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x640x14x14>
tensor<[1,640,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 14 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x640x16x16>
tensor<[1,640,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 16 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x2>>, >
shape: #ttnn.shape<1x640x64x64>
tensor<[1,640,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 64 + d2, d3), memory_config: (1280, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x4>>, >
shape: #ttnn.shape<1x64x112x112>
tensor<[1,64,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 112 + d2, d3), memory_config: (224, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, bf16>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x5>>, >
shape: #ttnn.shape<1x64x147x147>
tensor<[1,64,147,147,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 147 + d2, d3), memory_config: (294, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x64x14x14>
tensor<[1,64,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 14 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x5>>, >
shape: #ttnn.shape<1x64x150x150>
tensor<[1,64,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 150 + d2, d3), memory_config: (300, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x5>>, >
shape: #ttnn.shape<1x64x160x160>
tensor<[1,64,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 160 + d2, d3), memory_config: (320, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x10>>, >
shape: #ttnn.shape<1x64x180x320>
tensor<[1,64,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 180 + d2, d3), memory_config: (360, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1x1>
tensor<[1,64,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x7>>, >
shape: #ttnn.shape<1x64x224x224>
tensor<[1,64,224,224,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 224 + d2, d3), memory_config: (448, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x64x256x256>
tensor<[1,64,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 256 + d2, d3), memory_config: (512, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x64x28x28>
tensor<[1,64,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 28 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x64x2x2>
tensor<[1,64,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 2 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x64x35x35>
tensor<[1,64,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 35 + d2, d3), memory_config: (70, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x20>>, >
shape: #ttnn.shape<1x64x360x640>
tensor<[1,64,360,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 360 + d2, d3), memory_config: (720, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x160>>, >
shape: #ttnn.shape<1x64x5120>
tensor<[1,64,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 160, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x2>>, >
shape: #ttnn.shape<1x64x56x56>
tensor<[1,64,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 56 + d2, d3), memory_config: (112, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x64x64x64>
tensor<[1,64,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<146x3>>, >
shape: #ttnn.shape<1x64x73x73>
tensor<[1,64,73,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4672 + d1 * 73 + d2, d3), memory_config: (146, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x3>>, >
shape: #ttnn.shape<1x64x80x80>
tensor<[1,64,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 80 + d2, d3), memory_config: (160, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<1x65536x192>
tensor<[1,65536,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<1x65536x192>
tensor<[1,65536,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x65>
tensor<[1,65,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x66>
tensor<[1,66,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x672x10x10>
tensor<[1,672,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 10 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x672x15x15>
tensor<[1,672,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 15 + d2, d3), memory_config: (315, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x672x10x10>
tensor<[1,672,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 10 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x672x20x20>
tensor<[1,672,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 20 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x672x20x20>
tensor<[1,672,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 20 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x672x24x24>
tensor<[1,672,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 24 + d2, d3), memory_config: (504, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x672x28x28>
tensor<[1,672,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 28 + d2, d3), memory_config: (588, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x2>>, >
shape: #ttnn.shape<1x672x56x56>
tensor<[1,672,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 56 + d2, d3), memory_config: (1176, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x672x8x8>
tensor<[1,672,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 8 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x67>
tensor<[1,67,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x68>
tensor<[1,68,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x68x14x14>
tensor<[1,68,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 14 + d2, d3), memory_config: (30, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x2>>, >
shape: #ttnn.shape<1x68x56x56>
tensor<[1,68,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 56 + d2, d3), memory_config: (119, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x696x28x28>
tensor<[1,696,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 28 + d2, d3), memory_config: (609, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x696x28x28>
tensor<[1,696,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 28 + d2, d3), memory_config: (609, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x2>>, >
shape: #ttnn.shape<1x696x56x56>
tensor<[1,696,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 56 + d2, d3), memory_config: (1218, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x69>
tensor<[1,69,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x6x1536>
tensor<[1,6,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x704x14x14>
tensor<[1,704,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 14 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x70>
tensor<[1,70,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x71>
tensor<[1,71,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x2>>, >
shape: #ttnn.shape<1x71x7x64>
tensor<[1,71,7,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<383x1>>, >
shape: #ttnn.shape<1x720x17x17>
tensor<[1,720,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12240 + d1 * 17 + d2, d3), memory_config: (383, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x720x9x9>
tensor<[1,720,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6480 + d1 * 9 + d2, d3), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<433x1>>, >
shape: #ttnn.shape<1x728x19x19>
tensor<[1,728,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13832 + d1 * 19 + d2, d3), memory_config: (433, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<865x2>>, >
shape: #ttnn.shape<1x728x38x38>
tensor<[1,728,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27664 + d1 * 38 + d2, d3), memory_config: (865, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x72>
tensor<[1,72,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x72x14x14>
tensor<[1,72,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 14 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x2>>, >
shape: #ttnn.shape<1x72x40x40>
tensor<[1,72,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 40 + d2, d3), memory_config: (90, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x2>>, >
shape: #ttnn.shape<1x72x40x40>
tensor<[1,72,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 40 + d2, d3), memory_config: (90, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<1x72x56x56>
tensor<[1,72,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 56 + d2, d3), memory_config: (126, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<1x72x56x56>
tensor<[1,72,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 56 + d2, d3), memory_config: (126, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x72x7x7>
tensor<[1,72,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x3>>, >
shape: #ttnn.shape<1x72x80x80>
tensor<[1,72,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 80 + d2, d3), memory_config: (180, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x736x14x14>
tensor<[1,736,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 14 + d2, d3), memory_config: (322, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x73>
tensor<[1,73,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x74>
tensor<[1,74,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x75>
tensor<[1,75,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x76>
tensor<[1,76,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x77>
tensor<[1,77,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x25>>, >
shape: #ttnn.shape<1x784>
tensor<[1,784,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 25, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x78>
tensor<[1,78,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<69x1>>, >
shape: #ttnn.shape<1x78x28x28>
tensor<[1,78,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2184 + d1 * 28 + d2, d3), memory_config: (69, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x79>
tensor<[1,79,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x7x1536>
tensor<[1,7,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<1x7x7x1024>
tensor<[1,7,7,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<1x7x7x1024>
tensor<[1,7,7,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x7x7x2048>
tensor<[1,7,7,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x7x7x2048>
tensor<[1,7,7,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x800x14x14>
tensor<[1,800,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 14 + d2, d3), memory_config: (350, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x80>
tensor<[1,80,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x80x10x10>
tensor<[1,80,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 800 + d1 * 10 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x80x14x14>
tensor<[1,80,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 14 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x80x15x15>
tensor<[1,80,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 15 + d2, d3), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x1>>, >
shape: #ttnn.shape<1x80x20x20>
tensor<[1,80,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 20 + d2, d3), memory_config: (50, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x80x7x7>
tensor<[1,80,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 7 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<255x1>>, >
shape: #ttnn.shape<1x816x10x10>
tensor<[1,816,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8160 + d1 * 10 + d2, d3), memory_config: (255, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<485x1>>, >
shape: #ttnn.shape<1x816x19x19>
tensor<[1,816,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15504 + d1 * 19 + d2, d3), memory_config: (485, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x81>
tensor<[1,81,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x82>
tensor<[1,82,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x832x14x14>
tensor<[1,832,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 14 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x83>
tensor<[1,83,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x84>
tensor<[1,84,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x85>
tensor<[1,85,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x864x14x14>
tensor<[1,864,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 14 + d2, d3), memory_config: (378, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x86>
tensor<[1,86,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x87>
tensor<[1,87,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x88>
tensor<[1,88,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x88x17x17>
tensor<[1,88,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1496 + d1 * 17 + d2, d3), memory_config: (47, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x896x14x14>
tensor<[1,896,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 14 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x896x7x7>
tensor<[1,896,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 7 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x89>
tensor<[1,89,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x4>>, >
shape: #ttnn.shape<1x8x112x112>
tensor<[1,8,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 112 + d2, d3), memory_config: (28, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x8x1536>
tensor<[1,8,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x4>>, >
shape: #ttnn.shape<1x8x32x128>
tensor<[1,8,32,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (8, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x90>
tensor<[1,90,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x91>
tensor<[1,91,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x928x14x14>
tensor<[1,928,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 14 + d2, d3), memory_config: (406, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x928x7x7>
tensor<[1,928,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6496 + d1 * 7 + d2, d3), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x92>
tensor<[1,92,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x92x14x14>
tensor<[1,92,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 14 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x93>
tensor<[1,93,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x94>
tensor<[1,94,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x95>
tensor<[1,95,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x960x12x12>
tensor<[1,960,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 12 + d2, d3), memory_config: (360, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x960x14x14>
tensor<[1,960,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 14 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x1>>, >
shape: #ttnn.shape<1x960x24x24>
tensor<[1,960,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 24 + d2, d3), memory_config: (720, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x960x32x32>
tensor<[1,960,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 32 + d2, d3), memory_config: (960, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x1>>, >
shape: #ttnn.shape<1x960x3x3>
tensor<[1,960,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 3 + d2, d3), memory_config: (90, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x2>>, >
shape: #ttnn.shape<1x960x64x64>
tensor<[1,960,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 64 + d2, d3), memory_config: (1920, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x96>
tensor<[1,96,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x4>>, >
shape: #ttnn.shape<1x96x112x112>
tensor<[1,96,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 112 + d2, d3), memory_config: (336, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x4>>, >
shape: #ttnn.shape<1x96x120x120>
tensor<[1,96,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 120 + d2, d3), memory_config: (360, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<390x5>>, >
shape: #ttnn.shape<1x96x130x130>
tensor<[1,96,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12480 + d1 * 130 + d2, d3), memory_config: (390, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x96x14x14>
tensor<[1,96,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 14 + d2, d3), memory_config: (42, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x1>>, >
shape: #ttnn.shape<1x96x19x19>
tensor<[1,96,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 19 + d2, d3), memory_config: (57, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x96x28x28>
tensor<[1,96,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 28 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x2>>, >
shape: #ttnn.shape<1x96x35x35>
tensor<[1,96,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 35 + d2, d3), memory_config: (105, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x2>>, >
shape: #ttnn.shape<1x96x56x56>
tensor<[1,96,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 56 + d2, d3), memory_config: (168, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x2>>, >
shape: #ttnn.shape<1x96x60x60>
tensor<[1,96,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 60 + d2, d3), memory_config: (180, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<195x3>>, >
shape: #ttnn.shape<1x96x65x65>
tensor<[1,96,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6240 + d1 * 65 + d2, d3), memory_config: (195, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<213x3>>, >
shape: #ttnn.shape<1x96x71x71>
tensor<[1,96,71,71,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6816 + d1 * 71 + d2, d3), memory_config: (213, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<219x3>>, >
shape: #ttnn.shape<1x96x73x73>
tensor<[1,96,73,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7008 + d1 * 73 + d2, d3), memory_config: (219, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x97>
tensor<[1,97,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x98>
tensor<[1,98,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x98x28x28>
tensor<[1,98,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 28 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<434x1>>, >
shape: #ttnn.shape<1x992x14x14>
tensor<[1,992,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13888 + d1 * 14 + d2, d3), memory_config: (434, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<217x1>>, >
shape: #ttnn.shape<1x992x7x7>
tensor<[1,992,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6944 + d1 * 7 + d2, d3), memory_config: (217, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x99>
tensor<[1,99,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x9x1024>
tensor<[1,9,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x9x1024>
tensor<[1,9,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x9x1536>
tensor<[1,9,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x9x2048>
tensor<[1,9,2048,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x9x2048>
tensor<[1,9,2048,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x9x768>
tensor<[1,9,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x9x768>
tensor<[1,9,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<201x3072>
tensor<[201,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<201x768>
tensor<[201,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x40>>, >
shape: #ttnn.shape<2048x1280>
tensor<[2048,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x8>>, >
shape: #ttnn.shape<2048x256>
tensor<[2048,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<2048x768>
tensor<[2048,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x8>>, >
shape: #ttnn.shape<256>
tensor<[256,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x320>>, >
shape: #ttnn.shape<256x10240>
tensor<[256,10240,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 320, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<256x1024>
tensor<[256,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<256x1280>
tensor<[256,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<256x1536>
tensor<[256,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<256x160>
tensor<[256,160,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<256x256>
tensor<[256,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<256x2>
tensor<[256,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<256x32>
tensor<[256,32,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x128>>, >
shape: #ttnn.shape<256x4096>
tensor<[256,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<256x512>
tensor<[256,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x192>>, >
shape: #ttnn.shape<256x6144>
tensor<[256,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<256x64>
tensor<[256,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x24>>, >
shape: #ttnn.shape<256x768>
tensor<[256,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x72>>, >
shape: #ttnn.shape<257x2304>
tensor<[257,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x96>>, >
shape: #ttnn.shape<257x3072>
tensor<[257,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<257x768>
tensor<[257,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<25x2>
tensor<[25,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<25x3072>
tensor<[25,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<25x768>
tensor<[25,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x954>>, >
shape: #ttnn.shape<27x30522>
tensor<[27,30522,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 954, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<27x38>
tensor<[27,38,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<27x50257>
tensor<[27,50257,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2>
tensor<[2,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<2x13x768>
tensor<[2,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1>
tensor<[2,1,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1x1x13>
tensor<[2,1,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<2x7x2048>
tensor<[2,7,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<2x7x2048>
tensor<[2,7,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<300x128>
tensor<[300,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x64>>, >
shape: #ttnn.shape<300x2048>
tensor<[300,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<300x320>
tensor<[300,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<300x512>
tensor<[300,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<300x64>
tensor<[300,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x32x3072>
tensor<[1,32,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<3136x128>
tensor<[3136,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x12>>, >
shape: #ttnn.shape<3136x384>
tensor<[3136,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (98, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x102>>, >
shape: #ttnn.shape<3234>
tensor<[3234,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 102, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<32x1536>
tensor<[32,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<32x32>
tensor<[32,32,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x144>>, >
shape: #ttnn.shape<32x4608>
tensor<[32,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<32x6144>
tensor<[32,6144,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 192, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<3584>
tensor<[3584,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 112, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<1x13x3584>
tensor<[1,13,3584,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 112, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x1>>, >
shape: #ttnn.shape<36x12x144x32>
tensor<[36,12,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1728 + d1 * 144 + d2, d3), memory_config: (1944, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3888x1>>, >
shape: #ttnn.shape<36x24x144x32>
tensor<[36,24,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3456 + d1 * 144 + d2, d3), memory_config: (3888, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<4096>
tensor<[4096,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x32x4096>
tensor<[1,32,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<4096x1536>
tensor<[4096,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x80>>, >
shape: #ttnn.shape<4096x2560>
tensor<[4096,2560,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 80, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x8>>, >
shape: #ttnn.shape<4096x256>
tensor<[4096,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x96>>, >
shape: #ttnn.shape<4096x3072>
tensor<[4096,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<4096x320>
tensor<[4096,320,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<4096x384>
tensor<[4096,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<4096x64>
tensor<[4096,64,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<4096x768>
tensor<[4096,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<45x3072>
tensor<[45,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<45x45>
tensor<[45,45,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<45x768>
tensor<[45,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<4800x128>
tensor<[4800,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x16>>, >
shape: #ttnn.shape<4800x512>
tensor<[4800,512,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (150, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<13068x1>>, >
shape: #ttnn.shape<484x6x144x32>
tensor<[484,6,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 864 + d1 * 144 + d2, d3), memory_config: (13068, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<49x1024>
tensor<[49,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<49x3072>
tensor<[49,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<4x13x1024>
tensor<[4,13,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<4x16x49x32>
tensor<[4,16,49,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 49 + d2, d3), memory_config: (98, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<864x1>>, >
shape: #ttnn.shape<4x48x144x32>
tensor<[4,48,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6912 + d1 * 144 + d2, d3), memory_config: (864, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<50x3072>
tensor<[50,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<50x768>
tensor<[50,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<51200>
tensor<[51200,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<512>
tensor<[512,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x10x512>
tensor<[1,10,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x15x512>
tensor<[1,15,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x36>>, >
shape: #ttnn.shape<5184x1152>
tensor<[5184,1152,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 36, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x72>>, >
shape: #ttnn.shape<5184x2304>
tensor<[5184,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x12>>, >
shape: #ttnn.shape<5184x384>
tensor<[5184,384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x24>>, >
shape: #ttnn.shape<5184x768>
tensor<[5184,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (162, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<52x1024>
tensor<[52,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x48>>, >
shape: #ttnn.shape<576x1536>
tensor<[576,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (18, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x144>>, >
shape: #ttnn.shape<576x4608>
tensor<[576,4608,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (18, 144, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<5x1024>
tensor<[5,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<5x4096>
tensor<[5,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1600>>, >
shape: #ttnn.shape<5x51200>
tensor<[5,51200,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1600, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x320>>, >
shape: #ttnn.shape<64x10240>
tensor<[64,10240,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 320, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<64x1280>
tensor<[64,1280,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<64x4x49x32>
tensor<[64,4,49,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 49 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<65536x192>
tensor<[65536,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x24>>, >
shape: #ttnn.shape<65536x768>
tensor<[65536,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2048, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x6>>, >
shape: #ttnn.shape<69696x192>
tensor<[69696,192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2178, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x18>>, >
shape: #ttnn.shape<69696x576>
tensor<[69696,576,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2178, 18, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<6x1024>
tensor<[6,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<6x4096>
tensor<[6,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<2x13x768>
tensor<[2,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x7>>, >
shape: #ttnn.shape<768x196>
tensor<[768,196,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (24, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<784x256>
tensor<[784,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x24>>, >
shape: #ttnn.shape<784x768>
tensor<[784,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (25, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x72>>, >
shape: #ttnn.shape<7x2304>
tensor<[7,2304,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 72, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<7x3072>
tensor<[7,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<7x768>
tensor<[7,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<8x100x32>
tensor<[8,100,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (25, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<230x1>>, >
shape: #ttnn.shape<8x920x32>
tensor<[8,920,32,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 920 + d1, d2), memory_config: (230, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x1x256>
tensor<[920,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x1x256>
tensor<[920,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x64>>, >
shape: #ttnn.shape<920x2048>
tensor<[920,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x256>
tensor<[920,256,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<9x1024>
tensor<[9,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<9x128>
tensor<[9,128,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<9x16384>
tensor<[9,16384,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 512, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<9x2048>
tensor<[9,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<972x1>>, >
shape: #ttnn.shape<9x24x144x32>
tensor<[9,24,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3456 + d1 * 144 + d2, d3), memory_config: (972, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x938>>, >
shape: #ttnn.shape<9x30000>
tensor<[9,30000,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 938, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<9x3072>
tensor<[9,3072,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<9x4096>
tensor<[9,4096,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1944x1>>, >
shape: #ttnn.shape<9x48x144x32>
tensor<[9,48,144,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6912 + d1 * 144 + d2, d3), memory_config: (1944, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<9x768>
tensor<[9,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<9x8192>
tensor<[9,8192,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 256, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,bf16]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<4x49x49>
tensor<[4,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<64x49x49>
tensor<[64,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x2>
tensor<[8,2,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x2>
tensor<[8,2,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x64>
tensor<[1,12,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x16x16>
tensor<[1,1,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x7x32>
tensor<[1,1,7,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x2>>, >
shape: #ttnn.shape<1x24x32x64>
tensor<[1,24,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (24, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x2>>, >
shape: #ttnn.shape<1x28x13x64>
tensor<[1,28,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (12, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x2x12x64>
tensor<[1,2,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 12 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x2x1x64>
tensor<[1,2,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x32x32x64>
tensor<[1,32,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x4x13x64>
tensor<[1,4,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 13 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<3x1>>, >
shape: #ttnn.shape<1x5x16x16>
tensor<[1,5,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (3, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x71x7x32>
tensor<[1,71,7,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x8x32x64>
tensor<[1,8,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (8, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<3072x16>
tensor<[3072,16,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x10x1536>
tensor<[1,10,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x10x512>
tensor<[1,10,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x11x1536>
tensor<[1,11,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x12x1536>
tensor<[1,12,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x13x1536>
tensor<[1,13,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x112>>, >
shape: #ttnn.shape<1x13x3584>
tensor<[1,13,3584,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 112, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x14x1536>
tensor<[1,14,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x15x1536>
tensor<[1,15,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x15x512>
tensor<[1,15,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1x1536>
tensor<[1,1,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x512>
tensor<[1,1,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x32x3072>
tensor<[1,32,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x32x4096>
tensor<[1,32,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x1>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x512>
tensor<[1,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x6x1536>
tensor<[1,6,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x7x1536>
tensor<[1,7,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x8x1536>
tensor<[1,8,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x9x1536>
tensor<[1,9,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<2x13x768>
tensor<[2,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1>
tensor<[2,1,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x512>
tensor<[2,512,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x100x14x14>
tensor<[1,100,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 14 + d2, d3), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x6>>, >
shape: #ttnn.shape<1x100x192>
tensor<[1,100,192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (4, 6, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1024x10x10>
tensor<[1,1024,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 10 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<608x1>>, >
shape: #ttnn.shape<1x1024x19x19>
tensor<[1,1024,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19456 + d1 * 19 + d2, d3), memory_config: (608, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x1024x28x28>
tensor<[1,1024,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 28 + d2, d3), memory_config: (896, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x3>>, >
shape: #ttnn.shape<1x1024x45x80>
tensor<[1,1024,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 45 + d2, d3), memory_config: (1440, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<462x1>>, >
shape: #ttnn.shape<1x1056x14x14>
tensor<[1,1056,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14784 + d1 * 14 + d2, d3), memory_config: (462, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<231x1>>, >
shape: #ttnn.shape<1x1056x7x7>
tensor<[1,1056,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7392 + d1 * 7 + d2, d3), memory_config: (231, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<476x1>>, >
shape: #ttnn.shape<1x1088x14x14>
tensor<[1,1088,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15232 + d1 * 14 + d2, d3), memory_config: (476, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x1088x7x7>
tensor<[1,1088,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 7 + d2, d3), memory_config: (238, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x10x2048>
tensor<[1,10,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x10x3072>
tensor<[1,10,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x10x4096>
tensor<[1,10,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<490x1>>, >
shape: #ttnn.shape<1x1120x14x14>
tensor<[1,1120,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15680 + d1 * 14 + d2, d3), memory_config: (490, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x1>>, >
shape: #ttnn.shape<1x1120x7x7>
tensor<[1,1120,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 7 + d2, d3), memory_config: (245, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x112x14x14>
tensor<[1,112,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 14 + d2, d3), memory_config: (49, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x1152x14x14>
tensor<[1,1152,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 14 + d2, d3), memory_config: (504, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x1152x7x7>
tensor<[1,1152,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 7 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<518x1>>, >
shape: #ttnn.shape<1x1184x14x14>
tensor<[1,1184,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16576 + d1 * 14 + d2, d3), memory_config: (518, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<259x1>>, >
shape: #ttnn.shape<1x1184x7x7>
tensor<[1,1184,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8288 + d1 * 7 + d2, d3), memory_config: (259, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x120x1x1>
tensor<[1,120,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x2>>, >
shape: #ttnn.shape<1x120x40x40>
tensor<[1,120,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 40 + d2, d3), memory_config: (150, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<532x1>>, >
shape: #ttnn.shape<1x1216x14x14>
tensor<[1,1216,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17024 + d1 * 14 + d2, d3), memory_config: (532, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x1216x7x7>
tensor<[1,1216,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 7 + d2, d3), memory_config: (266, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<546x1>>, >
shape: #ttnn.shape<1x1248x14x14>
tensor<[1,1248,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17472 + d1 * 14 + d2, d3), memory_config: (546, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<273x1>>, >
shape: #ttnn.shape<1x1248x7x7>
tensor<[1,1248,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8736 + d1 * 7 + d2, d3), memory_config: (273, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x128>
tensor<[1,128,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<560x1>>, >
shape: #ttnn.shape<1x1280x14x14>
tensor<[1,1280,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17920 + d1 * 14 + d2, d3), memory_config: (560, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x1280x1x1>
tensor<[1,1280,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 + d2, d3), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x4>>, >
shape: #ttnn.shape<1x128x112x112>
tensor<[1,128,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 112 + d2, d3), memory_config: (448, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x128x14x14>
tensor<[1,128,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 14 + d2, d3), memory_config: (56, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x5>>, >
shape: #ttnn.shape<1x128x150x150>
tensor<[1,128,150,150,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 150 + d2, d3), memory_config: (600, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<68x1>>, >
shape: #ttnn.shape<1x128x17x17>
tensor<[1,128,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2176 + d1 * 17 + d2, d3), memory_config: (68, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x10>>, >
shape: #ttnn.shape<1x128x180x320>
tensor<[1,128,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 180 + d2, d3), memory_config: (720, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x3>>, >
shape: #ttnn.shape<1x128x75x75>
tensor<[1,128,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 75 + d2, d3), memory_config: (300, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x128x7x7>
tensor<[1,128,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 7 + d2, d3), memory_config: (28, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x5>>, >
shape: #ttnn.shape<1x128x90x160>
tensor<[1,128,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 90 + d2, d3), memory_config: (360, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<574x1>>, >
shape: #ttnn.shape<1x1312x14x14>
tensor<[1,1312,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18368 + d1 * 14 + d2, d3), memory_config: (574, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x1312x7x7>
tensor<[1,1312,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9184 + d1 * 7 + d2, d3), memory_config: (287, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x1344x14x14>
tensor<[1,1344,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 14 + d2, d3), memory_config: (588, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x1>>, >
shape: #ttnn.shape<1x1344x28x28>
tensor<[1,1344,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 28 + d2, d3), memory_config: (1176, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x1344x7x7>
tensor<[1,1344,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 7 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<602x1>>, >
shape: #ttnn.shape<1x1376x14x14>
tensor<[1,1376,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19264 + d1 * 14 + d2, d3), memory_config: (602, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<301x1>>, >
shape: #ttnn.shape<1x1376x7x7>
tensor<[1,1376,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9632 + d1 * 7 + d2, d3), memory_config: (301, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x1392x14x14>
tensor<[1,1392,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 14 + d2, d3), memory_config: (609, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x1>>, >
shape: #ttnn.shape<1x1392x28x28>
tensor<[1,1392,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 28 + d2, d3), memory_config: (1218, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<616x1>>, >
shape: #ttnn.shape<1x1408x14x14>
tensor<[1,1408,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19712 + d1 * 14 + d2, d3), memory_config: (616, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x1408x7x7>
tensor<[1,1408,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 7 + d2, d3), memory_config: (308, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<630x1>>, >
shape: #ttnn.shape<1x1440x14x14>
tensor<[1,1440,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20160 + d1 * 14 + d2, d3), memory_config: (630, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x1440x7x7>
tensor<[1,1440,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 7 + d2, d3), memory_config: (315, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x144x14x14>
tensor<[1,144,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 14 + d2, d3), memory_config: (63, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x144x7x7>
tensor<[1,144,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 7 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<644x1>>, >
shape: #ttnn.shape<1x1472x14x14>
tensor<[1,1472,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20608 + d1 * 14 + d2, d3), memory_config: (644, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x1472x7x7>
tensor<[1,1472,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 7 + d2, d3), memory_config: (322, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<658x1>>, >
shape: #ttnn.shape<1x1504x14x14>
tensor<[1,1504,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21056 + d1 * 14 + d2, d3), memory_config: (658, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<329x1>>, >
shape: #ttnn.shape<1x1504x7x7>
tensor<[1,1504,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10528 + d1 * 7 + d2, d3), memory_config: (329, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1536x10x10>
tensor<[1,1536,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 10 + d2, d3), memory_config: (480, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x1536x14x14>
tensor<[1,1536,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 14 + d2, d3), memory_config: (672, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x1536x7x7>
tensor<[1,1536,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 7 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<686x1>>, >
shape: #ttnn.shape<1x1568x14x14>
tensor<[1,1568,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21952 + d1 * 14 + d2, d3), memory_config: (686, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<343x1>>, >
shape: #ttnn.shape<1x1568x7x7>
tensor<[1,1568,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10976 + d1 * 7 + d2, d3), memory_config: (343, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<700x1>>, >
shape: #ttnn.shape<1x1600x14x14>
tensor<[1,1600,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22400 + d1 * 14 + d2, d3), memory_config: (700, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x1600x7x7>
tensor<[1,1600,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 7 + d2, d3), memory_config: (350, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x160x14x14>
tensor<[1,160,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 14 + d2, d3), memory_config: (70, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x160x28x28>
tensor<[1,160,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 28 + d2, d3), memory_config: (140, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x2>>, >
shape: #ttnn.shape<1x160x56x56>
tensor<[1,160,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 56 + d2, d3), memory_config: (280, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<714x1>>, >
shape: #ttnn.shape<1x1632x14x14>
tensor<[1,1632,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22848 + d1 * 14 + d2, d3), memory_config: (714, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<357x1>>, >
shape: #ttnn.shape<1x1632x7x7>
tensor<[1,1632,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11424 + d1 * 7 + d2, d3), memory_config: (357, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<728x1>>, >
shape: #ttnn.shape<1x1664x14x14>
tensor<[1,1664,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23296 + d1 * 14 + d2, d3), memory_config: (728, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x1664x7x7>
tensor<[1,1664,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 7 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x168x1x1>
tensor<[1,168,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<742x1>>, >
shape: #ttnn.shape<1x1696x14x14>
tensor<[1,1696,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23744 + d1 * 14 + d2, d3), memory_config: (742, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<371x1>>, >
shape: #ttnn.shape<1x1696x7x7>
tensor<[1,1696,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11872 + d1 * 7 + d2, d3), memory_config: (371, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x16x14x14>
tensor<[1,16,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x5>>, >
shape: #ttnn.shape<1x16x160x160>
tensor<[1,16,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 160 + d2, d3), memory_config: (80, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x7>>, >
shape: #ttnn.shape<1x16x224x224>
tensor<[1,16,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 224 + d2, d3), memory_config: (112, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x16x28x28>
tensor<[1,16,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<756x1>>, >
shape: #ttnn.shape<1x1728x14x14>
tensor<[1,1728,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24192 + d1 * 14 + d2, d3), memory_config: (756, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x1728x7x7>
tensor<[1,1728,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 7 + d2, d3), memory_config: (378, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<6x1>>, >
shape: #ttnn.shape<1x174x1x1>
tensor<[1,174,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 174 + d1 + d2, d3), memory_config: (6, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<770x1>>, >
shape: #ttnn.shape<1x1760x14x14>
tensor<[1,1760,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24640 + d1 * 14 + d2, d3), memory_config: (770, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<385x1>>, >
shape: #ttnn.shape<1x1760x7x7>
tensor<[1,1760,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12320 + d1 * 7 + d2, d3), memory_config: (385, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<784x1>>, >
shape: #ttnn.shape<1x1792x14x14>
tensor<[1,1792,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25088 + d1 * 14 + d2, d3), memory_config: (784, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x1792x7x7>
tensor<[1,1792,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 7 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<399x1>>, >
shape: #ttnn.shape<1x1824x7x7>
tensor<[1,1824,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12768 + d1 * 7 + d2, d3), memory_config: (399, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x1856x7x7>
tensor<[1,1856,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 7 + d2, d3), memory_config: (406, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<413x1>>, >
shape: #ttnn.shape<1x1888x7x7>
tensor<[1,1888,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13216 + d1 * 7 + d2, d3), memory_config: (413, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x18x14x14>
tensor<[1,18,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 14 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x18x28x28>
tensor<[1,18,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 28 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x18x56x56>
tensor<[1,18,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 56 + d2, d3), memory_config: (32, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x1920x7x7>
tensor<[1,1920,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 7 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x192x14x14>
tensor<[1,192,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 14 + d2, d3), memory_config: (84, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x192x17x17>
tensor<[1,192,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 17 + d2, d3), memory_config: (102, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x192x28x28>
tensor<[1,192,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 28 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x2>>, >
shape: #ttnn.shape<1x192x35x35>
tensor<[1,192,35,35,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 35 + d2, d3), memory_config: (210, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x2>>, >
shape: #ttnn.shape<1x192x56x56>
tensor<[1,192,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 56 + d2, d3), memory_config: (336, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x192x7x7>
tensor<[1,192,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 7 + d2, d3), memory_config: (42, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x192x8x8>
tensor<[1,192,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 8 + d2, d3), memory_config: (48, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x1x2048>
tensor<[1,1,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2048x10x10>
tensor<[1,2048,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 10 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x2048x14x14>
tensor<[1,2048,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 14 + d2, d3), memory_config: (896, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1472x2>>, >
shape: #ttnn.shape<1x2048x23x40>
tensor<[1,2048,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 47104 + d1 * 23 + d2, d3), memory_config: (1472, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x2048x7x7>
tensor<[1,2048,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 7 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<91x1>>, >
shape: #ttnn.shape<1x208x14x14>
tensor<[1,208,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2912 + d1 * 14 + d2, d3), memory_config: (91, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20x1x1>
tensor<[1,20,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x224x14x14>
tensor<[1,224,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 14 + d2, d3), memory_config: (98, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x1>>, >
shape: #ttnn.shape<1x224x17x17>
tensor<[1,224,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 17 + d2, d3), memory_config: (119, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x224x28x28>
tensor<[1,224,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 28 + d2, d3), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x2>>, >
shape: #ttnn.shape<1x224x35x35>
tensor<[1,224,35,35,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 35 + d2, d3), memory_config: (245, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x224x56x56>
tensor<[1,224,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 56 + d2, d3), memory_config: (392, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x224x7x7>
tensor<[1,224,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 7 + d2, d3), memory_config: (49, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x4>>, >
shape: #ttnn.shape<1x232x112x112>
tensor<[1,232,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 112 + d2, d3), memory_config: (812, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x2>>, >
shape: #ttnn.shape<1x232x56x56>
tensor<[1,232,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 56 + d2, d3), memory_config: (406, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x240x1x1>
tensor<[1,240,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x4>>, >
shape: #ttnn.shape<1x24x112x112>
tensor<[1,24,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 112 + d2, d3), memory_config: (84, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x24x14x14>
tensor<[1,24,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 14 + d2, d3), memory_config: (11, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24x1x1>
tensor<[1,24,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1103x1>>, >
shape: #ttnn.shape<1x2520x14x14>
tensor<[1,2520,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 35280 + d1 * 14 + d2, d3), memory_config: (1103, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<552x1>>, >
shape: #ttnn.shape<1x2520x7x7>
tensor<[1,2520,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17640 + d1 * 7 + d2, d3), memory_config: (552, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<1x256x128x128>
tensor<[1,256,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 128 + d2, d3), memory_config: (1024, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x256x14x14>
tensor<[1,256,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 14 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x1>>, >
shape: #ttnn.shape<1x256x17x17>
tensor<[1,256,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4352 + d1 * 17 + d2, d3), memory_config: (136, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x10>>, >
shape: #ttnn.shape<1x256x180x320>
tensor<[1,256,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 180 + d2, d3), memory_config: (1440, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<304x2>>, >
shape: #ttnn.shape<1x256x38x38>
tensor<[1,256,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9728 + d1 * 38 + d2, d3), memory_config: (304, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x3>>, >
shape: #ttnn.shape<1x256x45x80>
tensor<[1,256,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 45 + d2, d3), memory_config: (360, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x3>>, >
shape: #ttnn.shape<1x256x75x75>
tensor<[1,256,75,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 75 + d2, d3), memory_config: (600, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x256x7x7>
tensor<[1,256,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 7 + d2, d3), memory_config: (56, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x256x8x8>
tensor<[1,256,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 8 + d2, d3), memory_config: (64, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x5>>, >
shape: #ttnn.shape<1x256x90x160>
tensor<[1,256,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 90 + d2, d3), memory_config: (720, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x288x14x14>
tensor<[1,288,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 14 + d2, d3), memory_config: (126, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x288x28x28>
tensor<[1,288,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 28 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x320x14x14>
tensor<[1,320,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 14 + d2, d3), memory_config: (140, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<170x1>>, >
shape: #ttnn.shape<1x320x17x17>
tensor<[1,320,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5440 + d1 * 17 + d2, d3), memory_config: (170, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x320x28x28>
tensor<[1,320,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 28 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x320x7x7>
tensor<[1,320,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 7 + d2, d3), memory_config: (70, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x320x8x8>
tensor<[1,320,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 8 + d2, d3), memory_config: (80, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x4>>, >
shape: #ttnn.shape<1x32x112x112>
tensor<[1,32,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 112 + d2, d3), memory_config: (112, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x5>>, >
shape: #ttnn.shape<1x32x120x160>
tensor<[1,32,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x5>>, >
shape: #ttnn.shape<1x32x147x147>
tensor<[1,32,147,147,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 147 + d2, d3), memory_config: (147, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x5>>, >
shape: #ttnn.shape<1x32x149x149>
tensor<[1,32,149,149,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4768 + d1 * 149 + d2, d3), memory_config: (149, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x32x14x14>
tensor<[1,32,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 14 + d2, d3), memory_config: (14, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x5>>, >
shape: #ttnn.shape<1x32x150x150>
tensor<[1,32,150,150,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 150 + d2, d3), memory_config: (150, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x32x256x256>
tensor<[1,32,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 256 + d2, d3), memory_config: (256, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<26x1>>, >
shape: #ttnn.shape<1x32x26x26>
tensor<[1,32,26,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 832 + d1 * 26 + d2, d3), memory_config: (26, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x32x28x28>
tensor<[1,32,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 28 + d2, d3), memory_config: (28, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x32x30x40>
tensor<[1,32,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x2>>, >
shape: #ttnn.shape<1x32x56x56>
tensor<[1,32,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 56 + d2, d3), memory_config: (56, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x32x60x80>
tensor<[1,32,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x32x7x7>
tensor<[1,32,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 7 + d2, d3), memory_config: (7, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x4>>, >
shape: #ttnn.shape<1x336x112x112>
tensor<[1,336,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 112 + d2, d3), memory_config: (1176, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x336x14x14>
tensor<[1,336,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x2>>, >
shape: #ttnn.shape<1x336x56x56>
tensor<[1,336,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 56 + d2, d3), memory_config: (588, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x348x1x1>
tensor<[1,348,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 348 + d1 + d2, d3), memory_config: (11, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<154x1>>, >
shape: #ttnn.shape<1x352x14x14>
tensor<[1,352,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4928 + d1 * 14 + d2, d3), memory_config: (154, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x352x28x28>
tensor<[1,352,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 28 + d2, d3), memory_config: (308, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x36x14x14>
tensor<[1,36,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 14 + d2, d3), memory_config: (16, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x36x28x28>
tensor<[1,36,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 28 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x2>>, >
shape: #ttnn.shape<1x36x56x56>
tensor<[1,36,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 56 + d2, d3), memory_config: (63, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1624x1>>, >
shape: #ttnn.shape<1x3712x14x14>
tensor<[1,3712,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 51968 + d1 * 14 + d2, d3), memory_config: (1624, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x1>>, >
shape: #ttnn.shape<1x3712x7x7>
tensor<[1,3712,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 7 + d2, d3), memory_config: (812, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x384x14x14>
tensor<[1,384,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 14 + d2, d3), memory_config: (168, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<204x1>>, >
shape: #ttnn.shape<1x384x17x17>
tensor<[1,384,17,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6528 + d1 * 17 + d2, d3), memory_config: (204, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x384x28x28>
tensor<[1,384,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 28 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x384x7x7>
tensor<[1,384,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 7 + d2, d3), memory_config: (84, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x384x8x8>
tensor<[1,384,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 * 8 + d2, d3), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x4096>
tensor<[1,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<182x1>>, >
shape: #ttnn.shape<1x416x14x14>
tensor<[1,416,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5824 + d1 * 14 + d2, d3), memory_config: (182, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x416x28x28>
tensor<[1,416,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 28 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x448x14x14>
tensor<[1,448,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 14 + d2, d3), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x448x28x28>
tensor<[1,448,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 28 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x448x8x8>
tensor<[1,448,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 8 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x480x28x28>
tensor<[1,480,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 28 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x480x7x7>
tensor<[1,480,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 7 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x48x14x14>
tensor<[1,48,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 14 + d2, d3), memory_config: (21, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x48x7x7>
tensor<[1,48,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 7 + d2, d3), memory_config: (11, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x4x14x14>
tensor<[1,4,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 56 + d1 * 14 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<368x2>>, >
shape: #ttnn.shape<1x512x23x40>
tensor<[1,512,23,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11776 + d1 * 23 + d2, d3), memory_config: (368, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x3>>, >
shape: #ttnn.shape<1x512x45x80>
tensor<[1,512,45,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 45 + d2, d3), memory_config: (720, 3, 'tile<32x32, bf16>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x2>>, >
shape: #ttnn.shape<1x512x56x56>
tensor<[1,512,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 56 + d2, d3), memory_config: (896, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x512x8x8>
tensor<[1,512,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 8 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1440x5>>, >
shape: #ttnn.shape<1x512x90x160>
tensor<[1,512,90,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 46080 + d1 * 90 + d2, d3), memory_config: (1440, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x544x14x14>
tensor<[1,544,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 14 + d2, d3), memory_config: (238, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x576x14x14>
tensor<[1,576,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 14 + d2, d3), memory_config: (252, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x58x1x1>
tensor<[1,58,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 58 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x608x14x14>
tensor<[1,608,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 14 + d2, d3), memory_config: (266, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x60x28x28>
tensor<[1,60,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 28 + d2, d3), memory_config: (53, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x64>
tensor<[1,64,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x640x14x14>
tensor<[1,640,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 14 + d2, d3), memory_config: (280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x4>>, >
shape: #ttnn.shape<1x64x112x112>
tensor<[1,64,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 112 + d2, d3), memory_config: (224, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x5>>, >
shape: #ttnn.shape<1x64x147x147>
tensor<[1,64,147,147,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 147 + d2, d3), memory_config: (294, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x64x14x14>
tensor<[1,64,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 14 + d2, d3), memory_config: (28, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x5>>, >
shape: #ttnn.shape<1x64x150x150>
tensor<[1,64,150,150,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 150 + d2, d3), memory_config: (300, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x5>>, >
shape: #ttnn.shape<1x64x160x160>
tensor<[1,64,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 160 + d2, d3), memory_config: (320, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x10>>, >
shape: #ttnn.shape<1x64x180x320>
tensor<[1,64,180,320,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 180 + d2, d3), memory_config: (360, 10, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x7>>, >
shape: #ttnn.shape<1x64x224x224>
tensor<[1,64,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 224 + d2, d3), memory_config: (448, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x64x24x24>
tensor<[1,64,24,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 24 + d2, d3), memory_config: (48, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x64x28x28>
tensor<[1,64,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 28 + d2, d3), memory_config: (56, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x64x35x35>
tensor<[1,64,35,35,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 35 + d2, d3), memory_config: (70, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x20>>, >
shape: #ttnn.shape<1x64x360x640>
tensor<[1,64,360,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 360 + d2, d3), memory_config: (720, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x20>>, >
shape: #ttnn.shape<1x64x480x640>
tensor<[1,64,480,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 480 + d2, d3), memory_config: (960, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x2>>, >
shape: #ttnn.shape<1x64x56x56>
tensor<[1,64,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 56 + d2, d3), memory_config: (112, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<146x3>>, >
shape: #ttnn.shape<1x64x73x73>
tensor<[1,64,73,73,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4672 + d1 * 73 + d2, d3), memory_config: (146, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x3>>, >
shape: #ttnn.shape<1x64x80x80>
tensor<[1,64,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 80 + d2, d3), memory_config: (160, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x672x28x28>
tensor<[1,672,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 28 + d2, d3), memory_config: (588, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x2>>, >
shape: #ttnn.shape<1x672x56x56>
tensor<[1,672,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 56 + d2, d3), memory_config: (1176, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x696x28x28>
tensor<[1,696,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 28 + d2, d3), memory_config: (609, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x2>>, >
shape: #ttnn.shape<1x696x56x56>
tensor<[1,696,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 56 + d2, d3), memory_config: (1218, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x704x14x14>
tensor<[1,704,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 14 + d2, d3), memory_config: (308, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<433x1>>, >
shape: #ttnn.shape<1x728x19x19>
tensor<[1,728,19,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13832 + d1 * 19 + d2, d3), memory_config: (433, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<865x2>>, >
shape: #ttnn.shape<1x728x38x38>
tensor<[1,728,38,38,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27664 + d1 * 38 + d2, d3), memory_config: (865, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x72x14x14>
tensor<[1,72,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 14 + d2, d3), memory_config: (32, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x2>>, >
shape: #ttnn.shape<1x72x40x40>
tensor<[1,72,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 40 + d2, d3), memory_config: (90, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<1x72x56x56>
tensor<[1,72,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 56 + d2, d3), memory_config: (126, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x3>>, >
shape: #ttnn.shape<1x72x80x80>
tensor<[1,72,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 80 + d2, d3), memory_config: (180, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x736x14x14>
tensor<[1,736,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 14 + d2, d3), memory_config: (322, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x800x14x14>
tensor<[1,800,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 14 + d2, d3), memory_config: (350, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x832x14x14>
tensor<[1,832,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 14 + d2, d3), memory_config: (364, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x864x14x14>
tensor<[1,864,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 14 + d2, d3), memory_config: (378, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x896x14x14>
tensor<[1,896,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 14 + d2, d3), memory_config: (392, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x896x7x7>
tensor<[1,896,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 7 + d2, d3), memory_config: (196, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x4>>, >
shape: #ttnn.shape<1x8x112x112>
tensor<[1,8,112,112,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 112 + d2, d3), memory_config: (28, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1x1>
tensor<[1,8,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x928x14x14>
tensor<[1,928,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 14 + d2, d3), memory_config: (406, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x928x7x7>
tensor<[1,928,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6496 + d1 * 7 + d2, d3), memory_config: (203, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x92x14x14>
tensor<[1,92,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 14 + d2, d3), memory_config: (41, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x960x14x14>
tensor<[1,960,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 14 + d2, d3), memory_config: (420, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x96x14x14>
tensor<[1,96,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 14 + d2, d3), memory_config: (42, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x96x28x28>
tensor<[1,96,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 28 + d2, d3), memory_config: (84, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x2>>, >
shape: #ttnn.shape<1x96x35x35>
tensor<[1,96,35,35,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 35 + d2, d3), memory_config: (105, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x2>>, >
shape: #ttnn.shape<1x96x56x56>
tensor<[1,96,56,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 56 + d2, d3), memory_config: (168, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<213x3>>, >
shape: #ttnn.shape<1x96x71x71>
tensor<[1,96,71,71,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6816 + d1 * 71 + d2, d3), memory_config: (213, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<219x3>>, >
shape: #ttnn.shape<1x96x73x73>
tensor<[1,96,73,73,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7008 + d1 * 73 + d2, d3), memory_config: (219, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<434x1>>, >
shape: #ttnn.shape<1x992x14x14>
tensor<[1,992,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13888 + d1 * 14 + d2, d3), memory_config: (434, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<217x1>>, >
shape: #ttnn.shape<1x992x7x7>
tensor<[1,992,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6944 + d1 * 7 + d2, d3), memory_config: (217, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<2x13x3072>
tensor<[2,13,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x8>>, >
shape: #ttnn.shape<6x1x100x256>
tensor<[6,1,100,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (19, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<6x4096>
tensor<[6,4096,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x64>>, >
shape: #ttnn.shape<920x1x2048>
tensor<[920,1,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<792x192>>, >
shape: #ttnn.shape<1x6x132x192>
tensor<[1,6,132,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 132 + d2, d3), memory_config: (792, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16632x192>>, >
shape: #ttnn.shape<1x126x132x192>
tensor<[1,126,132,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16632 + d1 * 132 + d2, d3), memory_config: (16632, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x6>>, >
shape: #ttnn.shape<1x132x132x192>
tensor<[1,132,132,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17424 + d1 * 132 + d2, d3), memory_config: (545, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<792x192>>, >
shape: #ttnn.shape<1x132x6x192>
tensor<[1,132,6,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 6 + d2, d3), memory_config: (792, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16632x192>>, >
shape: #ttnn.shape<1x132x126x192>
tensor<[1,132,126,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16632 + d1 * 126 + d2, d3), memory_config: (16632, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16632x384>>, >
shape: #ttnn.shape<1x126x132x384>
tensor<[1,126,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16632 + d1 * 132 + d2, d3), memory_config: (16632, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<792x384>>, >
shape: #ttnn.shape<1x6x132x384>
tensor<[1,6,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 132 + d2, d3), memory_config: (792, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x12>>, >
shape: #ttnn.shape<1x132x132x384>
tensor<[1,132,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17424 + d1 * 132 + d2, d3), memory_config: (545, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16632x384>>, >
shape: #ttnn.shape<1x132x126x384>
tensor<[1,132,126,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16632 + d1 * 126 + d2, d3), memory_config: (16632, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<792x384>>, >
shape: #ttnn.shape<1x132x6x384>
tensor<[1,132,6,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 6 + d2, d3), memory_config: (792, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<792x384>>, >
shape: #ttnn.shape<1x6x132x384>
tensor<[1,6,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 132 + d2, d3), memory_config: (792, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16632x384>>, >
shape: #ttnn.shape<1x126x132x384>
tensor<[1,126,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16632 + d1 * 132 + d2, d3), memory_config: (16632, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<545x12>>, >
shape: #ttnn.shape<1x132x132x384>
tensor<[1,132,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17424 + d1 * 132 + d2, d3), memory_config: (545, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<792x384>>, >
shape: #ttnn.shape<1x132x6x384>
tensor<[1,132,6,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 792 + d1 * 6 + d2, d3), memory_config: (792, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16632x384>>, >
shape: #ttnn.shape<1x132x126x384>
tensor<[1,132,126,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16632 + d1 * 126 + d2, d3), memory_config: (16632, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<154x512>>, >
shape: #ttnn.shape<1x11x14x512>
tensor<[1,11,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 154 + d1 * 14 + d2, d3), memory_config: (154, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<42x512>>, >
shape: #ttnn.shape<1x3x14x512>
tensor<[1,3,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 42 + d1 * 14 + d2, d3), memory_config: (42, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<154x512>>, >
shape: #ttnn.shape<1x14x11x512>
tensor<[1,14,11,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 154 + d1 * 11 + d2, d3), memory_config: (154, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<42x512>>, >
shape: #ttnn.shape<1x14x3x512>
tensor<[1,14,3,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 42 + d1 * 3 + d2, d3), memory_config: (42, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<42x512>>, >
shape: #ttnn.shape<1x3x14x512>
tensor<[1,3,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 42 + d1 * 14 + d2, d3), memory_config: (42, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<154x512>>, >
shape: #ttnn.shape<1x11x14x512>
tensor<[1,11,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 154 + d1 * 14 + d2, d3), memory_config: (154, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<42x512>>, >
shape: #ttnn.shape<1x14x3x512>
tensor<[1,14,3,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 42 + d1 * 3 + d2, d3), memory_config: (42, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<154x512>>, >
shape: #ttnn.shape<1x14x11x512>
tensor<[1,14,11,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 154 + d1 * 11 + d2, d3), memory_config: (154, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x1536>>, >
shape: #ttnn.shape<1x18x24x1536>
tensor<[1,18,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 24 + d2, d3), memory_config: (432, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1536>>, >
shape: #ttnn.shape<1x6x24x1536>
tensor<[1,6,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 24 + d2, d3), memory_config: (144, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x48>>, >
shape: #ttnn.shape<1x24x24x1536>
tensor<[1,24,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (18, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x1536>>, >
shape: #ttnn.shape<1x24x18x1536>
tensor<[1,24,18,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 18 + d2, d3), memory_config: (432, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1536>>, >
shape: #ttnn.shape<1x24x6x1536>
tensor<[1,24,6,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 6 + d2, d3), memory_config: (144, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1536>>, >
shape: #ttnn.shape<1x6x24x1536>
tensor<[1,6,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 24 + d2, d3), memory_config: (144, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x1536>>, >
shape: #ttnn.shape<1x18x24x1536>
tensor<[1,18,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 24 + d2, d3), memory_config: (432, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x48>>, >
shape: #ttnn.shape<1x24x24x1536>
tensor<[1,24,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 576 + d1 * 24 + d2, d3), memory_config: (18, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x1536>>, >
shape: #ttnn.shape<1x24x6x1536>
tensor<[1,24,6,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 6 + d2, d3), memory_config: (144, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x1536>>, >
shape: #ttnn.shape<1x24x18x1536>
tensor<[1,24,18,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 18 + d2, d3), memory_config: (432, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<68112x192>>, >
shape: #ttnn.shape<1x258x264x192>
tensor<[1,258,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 68112 + d1 * 264 + d2, d3), memory_config: (68112, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1584x192>>, >
shape: #ttnn.shape<1x6x264x192>
tensor<[1,6,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 264 + d2, d3), memory_config: (1584, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x6>>, >
shape: #ttnn.shape<1x264x264x192>
tensor<[1,264,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 69696 + d1 * 264 + d2, d3), memory_config: (2178, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<68112x192>>, >
shape: #ttnn.shape<1x264x258x192>
tensor<[1,264,258,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 68112 + d1 * 258 + d2, d3), memory_config: (68112, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1584x192>>, >
shape: #ttnn.shape<1x264x6x192>
tensor<[1,264,6,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 6 + d2, d3), memory_config: (1584, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1584x192>>, >
shape: #ttnn.shape<1x6x264x192>
tensor<[1,6,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 264 + d2, d3), memory_config: (1584, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<68112x192>>, >
shape: #ttnn.shape<1x258x264x192>
tensor<[1,258,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 68112 + d1 * 264 + d2, d3), memory_config: (68112, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2178x6>>, >
shape: #ttnn.shape<1x264x264x192>
tensor<[1,264,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 69696 + d1 * 264 + d2, d3), memory_config: (2178, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1584x192>>, >
shape: #ttnn.shape<1x264x6x192>
tensor<[1,264,6,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 6 + d2, d3), memory_config: (1584, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<68112x192>>, >
shape: #ttnn.shape<1x264x258x192>
tensor<[1,264,258,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 68112 + d1 * 258 + d2, d3), memory_config: (68112, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<700x256>>, >
shape: #ttnn.shape<1x25x28x256>
tensor<[1,25,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 700 + d1 * 28 + d2, d3), memory_config: (700, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<84x256>>, >
shape: #ttnn.shape<1x3x28x256>
tensor<[1,3,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 28 + d2, d3), memory_config: (84, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<700x256>>, >
shape: #ttnn.shape<1x28x25x256>
tensor<[1,28,25,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 700 + d1 * 25 + d2, d3), memory_config: (700, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<84x256>>, >
shape: #ttnn.shape<1x28x3x256>
tensor<[1,28,3,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 3 + d2, d3), memory_config: (84, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<84x256>>, >
shape: #ttnn.shape<1x3x28x256>
tensor<[1,3,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 28 + d2, d3), memory_config: (84, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<700x256>>, >
shape: #ttnn.shape<1x25x28x256>
tensor<[1,25,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 700 + d1 * 28 + d2, d3), memory_config: (700, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<84x256>>, >
shape: #ttnn.shape<1x28x3x256>
tensor<[1,28,3,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 84 + d1 * 3 + d2, d3), memory_config: (84, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<700x256>>, >
shape: #ttnn.shape<1x28x25x256>
tensor<[1,28,25,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 700 + d1 * 25 + d2, d3), memory_config: (700, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x1536>>, >
shape: #ttnn.shape<1x30x36x1536>
tensor<[1,30,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 36 + d2, d3), memory_config: (1080, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x1536>>, >
shape: #ttnn.shape<1x6x36x1536>
tensor<[1,6,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 36 + d2, d3), memory_config: (216, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x48>>, >
shape: #ttnn.shape<1x36x36x1536>
tensor<[1,36,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1296 + d1 * 36 + d2, d3), memory_config: (41, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x1536>>, >
shape: #ttnn.shape<1x36x30x1536>
tensor<[1,36,30,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 30 + d2, d3), memory_config: (1080, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x1536>>, >
shape: #ttnn.shape<1x36x6x1536>
tensor<[1,36,6,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 6 + d2, d3), memory_config: (216, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x1536>>, >
shape: #ttnn.shape<1x6x36x1536>
tensor<[1,6,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 36 + d2, d3), memory_config: (216, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x1536>>, >
shape: #ttnn.shape<1x30x36x1536>
tensor<[1,30,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 36 + d2, d3), memory_config: (1080, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x48>>, >
shape: #ttnn.shape<1x36x36x1536>
tensor<[1,36,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1296 + d1 * 36 + d2, d3), memory_config: (41, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x1536>>, >
shape: #ttnn.shape<1x36x6x1536>
tensor<[1,36,6,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 6 + d2, d3), memory_config: (216, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x1536>>, >
shape: #ttnn.shape<1x36x30x1536>
tensor<[1,36,30,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 30 + d2, d3), memory_config: (1080, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x768>>, >
shape: #ttnn.shape<1x30x36x768>
tensor<[1,30,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 36 + d2, d3), memory_config: (1080, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x768>>, >
shape: #ttnn.shape<1x6x36x768>
tensor<[1,6,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 36 + d2, d3), memory_config: (216, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x24>>, >
shape: #ttnn.shape<1x36x36x768>
tensor<[1,36,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1296 + d1 * 36 + d2, d3), memory_config: (41, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x768>>, >
shape: #ttnn.shape<1x36x30x768>
tensor<[1,36,30,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 30 + d2, d3), memory_config: (1080, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x768>>, >
shape: #ttnn.shape<1x36x6x768>
tensor<[1,36,6,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 6 + d2, d3), memory_config: (216, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x768>>, >
shape: #ttnn.shape<1x6x36x768>
tensor<[1,6,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 36 + d2, d3), memory_config: (216, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x768>>, >
shape: #ttnn.shape<1x30x36x768>
tensor<[1,30,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 36 + d2, d3), memory_config: (1080, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x24>>, >
shape: #ttnn.shape<1x36x36x768>
tensor<[1,36,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1296 + d1 * 36 + d2, d3), memory_config: (41, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<216x768>>, >
shape: #ttnn.shape<1x36x6x768>
tensor<[1,36,6,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 216 + d1 * 6 + d2, d3), memory_config: (216, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1080x768>>, >
shape: #ttnn.shape<1x36x30x768>
tensor<[1,36,30,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1080 + d1 * 30 + d2, d3), memory_config: (1080, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2968x128>>, >
shape: #ttnn.shape<1x53x56x128>
tensor<[1,53,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2968 + d1 * 56 + d2, d3), memory_config: (2968, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<168x128>>, >
shape: #ttnn.shape<1x3x56x128>
tensor<[1,3,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 56 + d2, d3), memory_config: (168, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2968x128>>, >
shape: #ttnn.shape<1x56x53x128>
tensor<[1,56,53,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2968 + d1 * 53 + d2, d3), memory_config: (2968, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<168x128>>, >
shape: #ttnn.shape<1x56x3x128>
tensor<[1,56,3,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 3 + d2, d3), memory_config: (168, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<168x128>>, >
shape: #ttnn.shape<1x3x56x128>
tensor<[1,3,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 56 + d2, d3), memory_config: (168, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2968x128>>, >
shape: #ttnn.shape<1x53x56x128>
tensor<[1,53,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2968 + d1 * 56 + d2, d3), memory_config: (2968, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<168x128>>, >
shape: #ttnn.shape<1x56x3x128>
tensor<[1,56,3,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 168 + d1 * 3 + d2, d3), memory_config: (168, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2968x128>>, >
shape: #ttnn.shape<1x56x53x128>
tensor<[1,56,53,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2968 + d1 * 53 + d2, d3), memory_config: (2968, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x384>>, >
shape: #ttnn.shape<1x66x72x384>
tensor<[1,66,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 72 + d2, d3), memory_config: (4752, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x384>>, >
shape: #ttnn.shape<1x6x72x384>
tensor<[1,6,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 72 + d2, d3), memory_config: (432, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x12>>, >
shape: #ttnn.shape<1x72x72x384>
tensor<[1,72,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5184 + d1 * 72 + d2, d3), memory_config: (162, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x384>>, >
shape: #ttnn.shape<1x72x66x384>
tensor<[1,72,66,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 66 + d2, d3), memory_config: (4752, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x384>>, >
shape: #ttnn.shape<1x72x6x384>
tensor<[1,72,6,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 6 + d2, d3), memory_config: (432, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x384>>, >
shape: #ttnn.shape<1x6x72x384>
tensor<[1,6,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 72 + d2, d3), memory_config: (432, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x384>>, >
shape: #ttnn.shape<1x66x72x384>
tensor<[1,66,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 72 + d2, d3), memory_config: (4752, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x12>>, >
shape: #ttnn.shape<1x72x72x384>
tensor<[1,72,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5184 + d1 * 72 + d2, d3), memory_config: (162, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x384>>, >
shape: #ttnn.shape<1x72x6x384>
tensor<[1,72,6,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 6 + d2, d3), memory_config: (432, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x384>>, >
shape: #ttnn.shape<1x72x66x384>
tensor<[1,72,66,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 66 + d2, d3), memory_config: (4752, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x768>>, >
shape: #ttnn.shape<1x66x72x768>
tensor<[1,66,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 72 + d2, d3), memory_config: (4752, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x768>>, >
shape: #ttnn.shape<1x6x72x768>
tensor<[1,6,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 72 + d2, d3), memory_config: (432, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x24>>, >
shape: #ttnn.shape<1x72x72x768>
tensor<[1,72,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5184 + d1 * 72 + d2, d3), memory_config: (162, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x768>>, >
shape: #ttnn.shape<1x72x66x768>
tensor<[1,72,66,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 66 + d2, d3), memory_config: (4752, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x768>>, >
shape: #ttnn.shape<1x72x6x768>
tensor<[1,72,6,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 6 + d2, d3), memory_config: (432, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x768>>, >
shape: #ttnn.shape<1x6x72x768>
tensor<[1,6,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 72 + d2, d3), memory_config: (432, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x768>>, >
shape: #ttnn.shape<1x66x72x768>
tensor<[1,66,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 72 + d2, d3), memory_config: (4752, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<162x24>>, >
shape: #ttnn.shape<1x72x72x768>
tensor<[1,72,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5184 + d1 * 72 + d2, d3), memory_config: (162, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<432x768>>, >
shape: #ttnn.shape<1x72x6x768>
tensor<[1,72,6,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 432 + d1 * 6 + d2, d3), memory_config: (432, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4752x768>>, >
shape: #ttnn.shape<1x72x66x768>
tensor<[1,72,66,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 66 + d2, d3), memory_config: (4752, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11x1>
tensor<[1,11,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x1200x1>
tensor<[1,1200,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x1>>, >
shape: #ttnn.shape<1x1370x1>
tensor<[1,1370,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13x1>
tensor<[1,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x1>>, >
shape: #ttnn.shape<1x1445x1>
tensor<[1,1445,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x14x14x1>
tensor<[1,14,14,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x1500x1>
tensor<[1,1500,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15x1>
tensor<[1,15,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x1>
tensor<[1,16384,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1>
tensor<[1,16,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x1>>, >
shape: #ttnn.shape<1x19200x1>
tensor<[1,19200,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x196x1>
tensor<[1,196,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x197x1>
tensor<[1,197,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x201x1>
tensor<[1,201,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x2048x1>
tensor<[1,2048,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x1>>, >
shape: #ttnn.shape<1x257x1>
tensor<[1,257,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25x1>
tensor<[1,25,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27x1>
tensor<[1,27,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x1>
tensor<[1,28,28,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x300x1>
tensor<[1,300,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1>
tensor<[1,32,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x45x1>
tensor<[1,45,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x4800x1>
tensor<[1,4800,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x50x1>
tensor<[1,50,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x56x56x1>
tensor<[1,56,56,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5x1>
tensor<[1,5,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1>
tensor<[1,64,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x1>>, >
shape: #ttnn.shape<1x65536x1>
tensor<[1,65536,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1>
tensor<[1,6,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7x1>
tensor<[1,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x7x7x1>
tensor<[1,7,7,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1>
tensor<[1,8,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x13x1>
tensor<[2,13,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x7x1>
tensor<[2,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x1x1>
tensor<[4,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x1>>, >
shape: #ttnn.shape<920x1x1>
tensor<[920,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x6>>, >
shape: #ttnn.shape<1x192>
tensor<[1,192,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 6, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x10x10>
tensor<[1,1,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10 + d1 * 10 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x12x12>
tensor<[1,1,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 * 12 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x14x14>
tensor<[1,1,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14 + d1 * 14 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x16x16>
tensor<[1,1,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x10>
tensor<[1,1,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x11>
tensor<[1,1,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x12>
tensor<[1,1,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x13>
tensor<[1,1,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x14>
tensor<[1,1,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x15>
tensor<[1,1,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x16>
tensor<[1,1,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x17>
tensor<[1,1,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x18>
tensor<[1,1,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x19>
tensor<[1,1,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x7>>, >
shape: #ttnn.shape<1x1x1x201>
tensor<[1,1,1,201,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 7, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x1x1x2048>
tensor<[1,1,1,2048,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x20>
tensor<[1,1,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x21>
tensor<[1,1,1,21,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x22>
tensor<[1,1,1,22,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x23>
tensor<[1,1,1,23,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x24>
tensor<[1,1,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x25>
tensor<[1,1,1,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x26>
tensor<[1,1,1,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x27>
tensor<[1,1,1,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x28>
tensor<[1,1,1,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x29>
tensor<[1,1,1,29,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x7>
tensor<[1,1,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x8>
tensor<[1,1,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x9>
tensor<[1,1,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x25x25>
tensor<[1,1,25,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 25 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x6x6>
tensor<[1,1,6,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 * 6 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x7x7>
tensor<[1,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x9x9>
tensor<[1,1,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 9 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1x1x13>
tensor<[2,1,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1x7x7>
tensor<[2,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x1x1x13>
tensor<[4,1,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1280>>, >
shape: #ttnn.shape<1x1x1280>
tensor<[1,1,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<120x160>>, >
shape: #ttnn.shape<1x1x120x160>
tensor<[1,1,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 120 + d2, d3), memory_config: (120, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<120x160>>, >
shape: #ttnn.shape<1x1x120x160>
tensor<[1,1,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 120 + d2, d3), memory_config: (120, 160, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<30x40>>, >
shape: #ttnn.shape<1x1x30x40>
tensor<[1,1,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30 + d1 * 30 + d2, d3), memory_config: (30, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<30x40>>, >
shape: #ttnn.shape<1x1x30x40>
tensor<[1,1,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30 + d1 * 30 + d2, d3), memory_config: (30, 40, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<60x80>>, >
shape: #ttnn.shape<1x1x60x80>
tensor<[1,1,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 60 + d1 * 60 + d2, d3), memory_config: (60, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<60x80>>, >
shape: #ttnn.shape<1x1x60x80>
tensor<[1,1,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 60 + d1 * 60 + d2, d3), memory_config: (60, 80, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x16>>, >
shape: #ttnn.shape<1x3072x1x16>
tensor<[1,3072,1,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 + d2, d3), memory_config: (3072, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x96>>, >
shape: #ttnn.shape<1x32x16x1x96>
tensor<[1,32,16,1,96,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 512 + d1 * 16 + d2 + d3, d4), memory_config: (512, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x96>>, >
shape: #ttnn.shape<1x32x16x1x96>
tensor<[1,32,16,1,96,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 512 + d1 * 16 + d2 + d3, d4), memory_config: (512, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x96>>, >
shape: #ttnn.shape<1x32x16x1x96>
tensor<[1,32,16,1,96,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 512 + d1 * 16 + d2 + d3, d4), memory_config: (512, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<224x224>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (224, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<224x224>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (224, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<224x224>>, >
shape: #ttnn.shape<1x1x224x224>
tensor<[1,1,224,224,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 224 + d2, d3), memory_config: (224, 224, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50257>>, >
shape: #ttnn.shape<1x1x50257>
tensor<[1,1,50257,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50257, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x51200>>, >
shape: #ttnn.shape<1x1x51200>
tensor<[1,1,51200,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 51200, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50272>>, >
shape: #ttnn.shape<1x1x50272>
tensor<[1,1,50272,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50272, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x50280>>, >
shape: #ttnn.shape<1x1x50280>
tensor<[1,1,50280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 50280, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x768>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<209088x32>>, >
shape: #ttnn.shape<1x121x12x144x32>
tensor<[1,121,12,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 209088 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (209088, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<209088x32>>, >
shape: #ttnn.shape<1x121x12x144x32>
tensor<[1,121,12,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 209088 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (209088, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<209088x32>>, >
shape: #ttnn.shape<1x121x12x144x32>
tensor<[1,121,12,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 209088 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (209088, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<104544x32>>, >
shape: #ttnn.shape<1x121x6x144x32>
tensor<[1,121,6,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 104544 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (104544, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<104544x32>>, >
shape: #ttnn.shape<1x121x6x144x32>
tensor<[1,121,6,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 104544 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (104544, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<104544x32>>, >
shape: #ttnn.shape<1x121x6x144x32>
tensor<[1,121,6,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 104544 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (104544, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1370x1280>>, >
shape: #ttnn.shape<1x1370x1x1280>
tensor<[1,1370,1,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1370 + d1 + d2, d3), memory_config: (1370, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1370x1280>>, >
shape: #ttnn.shape<1x1370x1x1280>
tensor<[1,1370,1,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1370 + d1 + d2, d3), memory_config: (1370, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1370x1280>>, >
shape: #ttnn.shape<1x1370x1x1280>
tensor<[1,1370,1,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1370 + d1 + d2, d3), memory_config: (1370, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6272x32>>, >
shape: #ttnn.shape<1x16x8x49x32>
tensor<[1,16,8,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 6272 + d1 * 392 + d2 * 49 + d3, d4), memory_config: (6272, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6272x32>>, >
shape: #ttnn.shape<1x16x8x49x32>
tensor<[1,16,8,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 6272 + d1 * 392 + d2 * 49 + d3, d4), memory_config: (6272, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6272x32>>, >
shape: #ttnn.shape<1x16x8x49x32>
tensor<[1,16,8,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 6272 + d1 * 392 + d2 * 49 + d3, d4), memory_config: (6272, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3084x64>>, >
shape: #ttnn.shape<1x1x12x257x64>
tensor<[1,1,12,257,64,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3084 + d1 * 3084 + d2 * 257 + d3, d4), memory_config: (3084, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3084x64>>, >
shape: #ttnn.shape<1x1x12x257x64>
tensor<[1,1,12,257,64,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3084 + d1 * 3084 + d2 * 257 + d3, d4), memory_config: (3084, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3084x64>>, >
shape: #ttnn.shape<1x1x12x257x64>
tensor<[1,1,12,257,64,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3084 + d1 * 3084 + d2 * 257 + d3, d4), memory_config: (3084, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1568x32>>, >
shape: #ttnn.shape<1x1x32x49x32>
tensor<[1,1,32,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 1568 + d1 * 1568 + d2 * 49 + d3, d4), memory_config: (1568, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1568x32>>, >
shape: #ttnn.shape<1x1x32x49x32>
tensor<[1,1,32,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 1568 + d1 * 1568 + d2 * 49 + d3, d4), memory_config: (1568, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1568x32>>, >
shape: #ttnn.shape<1x1x32x49x32>
tensor<[1,1,32,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 1568 + d1 * 1568 + d2 * 49 + d3, d4), memory_config: (1568, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<62208x32>>, >
shape: #ttnn.shape<1x36x12x144x32>
tensor<[1,36,12,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (62208, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<62208x32>>, >
shape: #ttnn.shape<1x36x12x144x32>
tensor<[1,36,12,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (62208, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<62208x32>>, >
shape: #ttnn.shape<1x36x12x144x32>
tensor<[1,36,12,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 1728 + d2 * 144 + d3, d4), memory_config: (62208, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<124416x32>>, >
shape: #ttnn.shape<1x36x24x144x32>
tensor<[1,36,24,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 124416 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (124416, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<124416x32>>, >
shape: #ttnn.shape<1x36x24x144x32>
tensor<[1,36,24,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 124416 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (124416, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<124416x32>>, >
shape: #ttnn.shape<1x36x24x144x32>
tensor<[1,36,24,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 124416 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (124416, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<418176x32>>, >
shape: #ttnn.shape<1x484x6x144x32>
tensor<[1,484,6,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 418176 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (418176, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<418176x32>>, >
shape: #ttnn.shape<1x484x6x144x32>
tensor<[1,484,6,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 418176 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (418176, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<418176x32>>, >
shape: #ttnn.shape<1x484x6x144x32>
tensor<[1,484,6,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 418176 + d1 * 864 + d2 * 144 + d3, d4), memory_config: (418176, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x32>>, >
shape: #ttnn.shape<1x4x16x49x32>
tensor<[1,4,16,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3136 + d1 * 784 + d2 * 49 + d3, d4), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x32>>, >
shape: #ttnn.shape<1x4x16x49x32>
tensor<[1,4,16,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3136 + d1 * 784 + d2 * 49 + d3, d4), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3136x32>>, >
shape: #ttnn.shape<1x4x16x49x32>
tensor<[1,4,16,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3136 + d1 * 784 + d2 * 49 + d3, d4), memory_config: (3136, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<27648x32>>, >
shape: #ttnn.shape<1x4x48x144x32>
tensor<[1,4,48,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 27648 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (27648, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<27648x32>>, >
shape: #ttnn.shape<1x4x48x144x32>
tensor<[1,4,48,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 27648 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (27648, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<27648x32>>, >
shape: #ttnn.shape<1x4x48x144x32>
tensor<[1,4,48,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 27648 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (27648, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x64x4x49x32>
tensor<[1,64,4,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12544 + d1 * 196 + d2 * 49 + d3, d4), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x64x4x49x32>
tensor<[1,64,4,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12544 + d1 * 196 + d2 * 49 + d3, d4), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12544x32>>, >
shape: #ttnn.shape<1x64x4x49x32>
tensor<[1,64,4,49,32,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12544 + d1 * 196 + d2 * 49 + d3, d4), memory_config: (12544, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<31104x32>>, >
shape: #ttnn.shape<1x9x24x144x32>
tensor<[1,9,24,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 31104 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (31104, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<31104x32>>, >
shape: #ttnn.shape<1x9x24x144x32>
tensor<[1,9,24,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 31104 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (31104, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<31104x32>>, >
shape: #ttnn.shape<1x9x24x144x32>
tensor<[1,9,24,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 31104 + d1 * 3456 + d2 * 144 + d3, d4), memory_config: (31104, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<62208x32>>, >
shape: #ttnn.shape<1x9x48x144x32>
tensor<[1,9,48,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (62208, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<62208x32>>, >
shape: #ttnn.shape<1x9x48x144x32>
tensor<[1,9,48,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (62208, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<62208x32>>, >
shape: #ttnn.shape<1x9x48x144x32>
tensor<[1,9,48,144,32,f32]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 62208 + d1 * 6912 + d2 * 144 + d3, d4), memory_config: (62208, 32, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x2x1>
tensor<[1,2,1,ui32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2 + d1, d2), memory_config: (2, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x2x1>
tensor<[1,2,1,ui32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2 + d1, d2), memory_config: (2, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x2x1>
tensor<[1,2,1,ui32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2 + d1, d2), memory_config: (2, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x2x1>
tensor<[1,2,1,ui32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2 + d1, d2), memory_config: (2, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x4>>, >
shape: #ttnn.shape<1x1x100x4>
tensor<[1,1,100,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (100, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x92>>, >
shape: #ttnn.shape<1x1x100x92>
tensor<[1,1,100,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (100, 92, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<8x1>
tensor<[8,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<68x40>>, >
shape: #ttnn.shape<1x3x720x1280>
tensor<[1,3,720,1280,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2160 + d1 * 720 + d2, d3), memory_config: (68, 40, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x120x14x14>
tensor<[1,120,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 14 + d2, d3), memory_config: (53, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x1392x1x1>
tensor<[1,1392,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1392 + d1 + d2, d3), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x184x7x7>
tensor<[1,184,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 7 + d2, d3), memory_config: (41, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<15x20>>, >
shape: #ttnn.shape<1x1x480x640>
tensor<[1,1,480,640,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 480 + d1 * 480 + d2, d3), memory_config: (15, 20, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x200x7x7>
tensor<[1,200,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 7 + d2, d3), memory_config: (44, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x232x1x1>
tensor<[1,232,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 232 + d1 + d2, d3), memory_config: (8, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x2x120x160>
tensor<[1,2,120,160,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 240 + d1 * 120 + d2, d3), memory_config: (8, 5, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x2x30x40>
tensor<[1,2,30,40,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 60 + d1 * 30 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<1x2x60x80>
tensor<[1,2,60,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 120 + d1 * 60 + d2, d3), memory_config: (4, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<116x1>>, >
shape: #ttnn.shape<1x3712x1x1>
tensor<[1,3712,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3712 + d1 + d2, d3), memory_config: (116, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x3>>, >
shape: #ttnn.shape<1x3x16x16x85>
tensor<[1,3,16,16,85,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (24, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x3>>, >
shape: #ttnn.shape<1x3x32x32x85>
tensor<[1,3,32,32,85,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (96, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<384x3>>, >
shape: #ttnn.shape<1x3x64x64x85>
tensor<[1,3,64,64,85,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (384, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x480x7x7>
tensor<[1,480,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 7 + d2, d3), memory_config: (105, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x50x3072>
tensor<[1,50,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<22x1>>, >
shape: #ttnn.shape<1x696x1x1>
tensor<[1,696,1,1,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 696 + d1 + d2, d3), memory_config: (22, 1, 'tile<32x32, bf16>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x1>>, >
shape: #ttnn.shape<1x960x3x3>
tensor<[1,960,3,3,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 3 + d2, d3), memory_config: (90, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<2x7x2048>
tensor<[2,7,2048,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 64, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x1>>, >
shape: #ttnn.shape<6x1x100x4>
tensor<[6,1,100,4,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 * 100 + d2, d3), memory_config: (19, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x1280x32x32>
tensor<[1,1280,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 32 + d2, d3), memory_config: (1280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x128x32x32>
tensor<[1,128,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 32 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x280>>, >
shape: #ttnn.shape<1x12x8960>
tensor<[1,12,8960,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 280, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x592>>, >
shape: #ttnn.shape<1x13x18944>
tensor<[1,13,18944,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 592, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x1920x16x16>
tensor<[1,1920,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 16 + d2, d3), memory_config: (960, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x1>>, >
shape: #ttnn.shape<1x1920x32x32>
tensor<[1,1920,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 32 + d2, d3), memory_config: (1920, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x280>>, >
shape: #ttnn.shape<1x1x8960>
tensor<[1,1,8960,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 280, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x1>>, >
shape: #ttnn.shape<1x2560x16x16>
tensor<[1,2560,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 16 + d2, d3), memory_config: (1280, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2560x8x8>
tensor<[1,2560,8,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 8 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x320x32x32>
tensor<[1,320,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 32 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x2>>, >
shape: #ttnn.shape<1x320x64x64>
tensor<[1,320,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 64 + d2, d3), memory_config: (640, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x344>>, >
shape: #ttnn.shape<1x32x11008>
tensor<[1,32,11008,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 344, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<1x32x128x128>
tensor<[1,32,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x32x256x256>
tensor<[1,32,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 256 + d2, d3), memory_config: (256, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x32x8192>
tensor<[1,32,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x640x16x16>
tensor<[1,640,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 16 + d2, d3), memory_config: (320, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x640x32x32>
tensor<[1,640,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 32 + d2, d3), memory_config: (640, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1280x2>>, >
shape: #ttnn.shape<1x640x64x64>
tensor<[1,640,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 40960 + d1 * 64 + d2, d3), memory_config: (1280, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x64x64x64>
tensor<[1,64,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<960x1>>, >
shape: #ttnn.shape<1x960x32x32>
tensor<[1,960,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 30720 + d1 * 32 + d2, d3), memory_config: (960, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1920x2>>, >
shape: #ttnn.shape<1x960x64x64>
tensor<[1,960,64,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 61440 + d1 * 64 + d2, d3), memory_config: (1920, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x13x128>
tensor<[1,13,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x128>
tensor<[1,1,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x32x128>
tensor<[1,32,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x14>>, >
shape: #ttnn.shape<3x14>
tensor<[3,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x14>>, >
shape: #ttnn.shape<4x14>
tensor<[4,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x14>>, >
shape: #ttnn.shape<7x14>
tensor<[7,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x2048>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 2048, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x2048>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 2048, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x2560>>, >
shape: #ttnn.shape<1x1024x2560>
tensor<[1,1024,2560,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (1024, 2560, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x2560>>, >
shape: #ttnn.shape<1x1024x2560>
tensor<[1,1024,2560,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (1024, 2560, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<10x96>>, >
shape: #ttnn.shape<1x10x96>
tensor<[1,10,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (10, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x10x16>
tensor<[1,10,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (10, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x10x16>
tensor<[1,10,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (10, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<11x96>>, >
shape: #ttnn.shape<1x11x96>
tensor<[1,11,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (11, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<11x16>>, >
shape: #ttnn.shape<1x11x16>
tensor<[1,11,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (11, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<11x16>>, >
shape: #ttnn.shape<1x11x16>
tensor<[1,11,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (11, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8192x192>>, >
shape: #ttnn.shape<1x64x128x192>
tensor<[1,64,128,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (8192, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8192x192>>, >
shape: #ttnn.shape<1x64x128x192>
tensor<[1,64,128,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (8192, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8192x384>>, >
shape: #ttnn.shape<1x64x128x384>
tensor<[1,64,128,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (8192, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8192x384>>, >
shape: #ttnn.shape<1x64x128x384>
tensor<[1,64,128,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (8192, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x192>>, >
shape: #ttnn.shape<1x128x128x192>
tensor<[1,128,128,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x384>>, >
shape: #ttnn.shape<1x128x128x384>
tensor<[1,128,128,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x192>>, >
shape: #ttnn.shape<1x128x128x192>
tensor<[1,128,128,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16384x192>>, >
shape: #ttnn.shape<1x128x128x192>
tensor<[1,128,128,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (16384, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12x96>>, >
shape: #ttnn.shape<1x12x96>
tensor<[1,12,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (12, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12x16>>, >
shape: #ttnn.shape<1x12x16>
tensor<[1,12,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (12, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12x16>>, >
shape: #ttnn.shape<1x12x16>
tensor<[1,12,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (12, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x64>>, >
shape: #ttnn.shape<1x12x12x64>
tensor<[1,12,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<144x64>>, >
shape: #ttnn.shape<1x12x12x64>
tensor<[1,12,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 144 + d1 * 12 + d2, d3), memory_config: (144, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1536>>, >
shape: #ttnn.shape<1x1x1536>
tensor<[1,1,1536,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1536, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12x64>>, >
shape: #ttnn.shape<1x12x1x64>
tensor<[1,12,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (12, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12x64>>, >
shape: #ttnn.shape<1x12x1x64>
tensor<[1,12,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (12, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16896x192>>, >
shape: #ttnn.shape<1x128x132x192>
tensor<[1,128,132,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16896 + d1 * 132 + d2, d3), memory_config: (16896, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16896x384>>, >
shape: #ttnn.shape<1x128x132x384>
tensor<[1,128,132,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16896 + d1 * 132 + d2, d3), memory_config: (16896, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<13x96>>, >
shape: #ttnn.shape<1x13x96>
tensor<[1,13,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (13, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<13x16>>, >
shape: #ttnn.shape<1x13x16>
tensor<[1,13,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (13, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<13x16>>, >
shape: #ttnn.shape<1x13x16>
tensor<[1,13,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (13, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<100x192>>, >
shape: #ttnn.shape<1x100x192>
tensor<[1,100,192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 100 + d1, d2), memory_config: (100, 192, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14x96>>, >
shape: #ttnn.shape<1x14x96>
tensor<[1,14,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (14, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14x16>>, >
shape: #ttnn.shape<1x14x16>
tensor<[1,14,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (14, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14x16>>, >
shape: #ttnn.shape<1x14x16>
tensor<[1,14,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (14, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<98x512>>, >
shape: #ttnn.shape<1x7x14x512>
tensor<[1,7,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 98 + d1 * 14 + d2, d3), memory_config: (98, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<98x512>>, >
shape: #ttnn.shape<1x7x14x512>
tensor<[1,7,14,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 98 + d1 * 14 + d2, d3), memory_config: (98, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x14x14x256>
tensor<[1,14,14,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x256>>, >
shape: #ttnn.shape<1x14x14x256>
tensor<[1,14,14,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (196, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (14, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (14, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<15x96>>, >
shape: #ttnn.shape<1x15x96>
tensor<[1,15,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (15, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<15x16>>, >
shape: #ttnn.shape<1x15x16>
tensor<[1,15,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (15, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<15x16>>, >
shape: #ttnn.shape<1x15x16>
tensor<[1,15,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (15, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1536>>, >
shape: #ttnn.shape<1x16x16x1536>
tensor<[1,16,16,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x768>>, >
shape: #ttnn.shape<1x16x16x768>
tensor<[1,16,16,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x768>>, >
shape: #ttnn.shape<1x16x16x768>
tensor<[1,16,16,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 16 + d2, d3), memory_config: (256, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1596x28>>, >
shape: #ttnn.shape<1x57x28x28>
tensor<[1,57,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1596 + d1 * 28 + d2, d3), memory_config: (1596, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3584x28>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (3584, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x1024>>, >
shape: #ttnn.shape<1x196x1024>
tensor<[1,196,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (196, 1024, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<196x768>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (196, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16x16>>, >
shape: #ttnn.shape<1x1x16x16>
tensor<[1,1,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (16, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16x16>>, >
shape: #ttnn.shape<1x1x16x16>
tensor<[1,1,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (16, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16x32>>, >
shape: #ttnn.shape<1x1x16x32>
tensor<[1,1,16,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (16, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<16x32>>, >
shape: #ttnn.shape<1x1x16x32>
tensor<[1,1,16,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (16, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<1x1x16>
tensor<[1,1,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x256>>, >
shape: #ttnn.shape<1x1x4x256>
tensor<[1,1,4,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 4 + d2, d3), memory_config: (4, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x256>>, >
shape: #ttnn.shape<1x1x4x256>
tensor<[1,1,4,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 4 + d2, d3), memory_config: (4, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x256>>, >
shape: #ttnn.shape<1x1x4x256>
tensor<[1,1,4,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 * 4 + d2, d3), memory_config: (4, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x1x7x32>
tensor<[1,1,7,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (7, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x1x7x32>
tensor<[1,1,7,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (7, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<384x1536>>, >
shape: #ttnn.shape<1x16x24x1536>
tensor<[1,16,24,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 24 + d2, d3), memory_config: (384, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<768x64>>, >
shape: #ttnn.shape<1x24x32x64>
tensor<[1,24,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (768, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<768x64>>, >
shape: #ttnn.shape<1x24x32x64>
tensor<[1,24,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 32 + d2, d3), memory_config: (768, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x5120>>, >
shape: #ttnn.shape<1x256x5120>
tensor<[1,256,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (256, 5120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x5120>>, >
shape: #ttnn.shape<1x256x5120>
tensor<[1,256,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (256, 5120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<32768x192>>, >
shape: #ttnn.shape<1x128x256x192>
tensor<[1,128,256,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 256 + d2, d3), memory_config: (32768, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<32768x192>>, >
shape: #ttnn.shape<1x128x256x192>
tensor<[1,128,256,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 256 + d2, d3), memory_config: (32768, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<65536x192>>, >
shape: #ttnn.shape<1x256x256x192>
tensor<[1,256,256,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 65536 + d1 * 256 + d2, d3), memory_config: (65536, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (256, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (256, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x25x1>
tensor<[1,25,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (25, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x25x1>
tensor<[1,25,1,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (25, 1, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<67584x192>>, >
shape: #ttnn.shape<1x256x264x192>
tensor<[1,256,264,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 67584 + d1 * 264 + d2, d3), memory_config: (67584, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<364x64>>, >
shape: #ttnn.shape<1x28x13x64>
tensor<[1,28,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (364, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<364x64>>, >
shape: #ttnn.shape<1x28x13x64>
tensor<[1,28,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 364 + d1 * 13 + d2, d3), memory_config: (364, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<392x256>>, >
shape: #ttnn.shape<1x14x28x256>
tensor<[1,14,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 392 + d1 * 28 + d2, d3), memory_config: (392, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<392x256>>, >
shape: #ttnn.shape<1x14x28x256>
tensor<[1,14,28,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 392 + d1 * 28 + d2, d3), memory_config: (392, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x28x28x128>
tensor<[1,28,28,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<784x128>>, >
shape: #ttnn.shape<1x28x28x128>
tensor<[1,28,28,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (784, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<24x64>>, >
shape: #ttnn.shape<1x2x12x64>
tensor<[1,2,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 12 + d2, d3), memory_config: (24, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<24x64>>, >
shape: #ttnn.shape<1x2x12x64>
tensor<[1,2,12,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24 + d1 * 12 + d2, d3), memory_config: (24, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x2x1x64>
tensor<[1,2,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2 + d1 + d2, d3), memory_config: (2, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x2x1x64>
tensor<[1,2,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2 + d1 + d2, d3), memory_config: (2, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x7>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 7, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x8>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 8, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x9>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 9, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x10>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 10, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x11>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 11, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x12>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 12, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x13>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 13, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x14>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x15>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 15, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x6>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 6, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x64>>, >
shape: #ttnn.shape<1x32x32x64>
tensor<[1,32,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x64>>, >
shape: #ttnn.shape<1x32x32x64>
tensor<[1,32,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x768>>, >
shape: #ttnn.shape<1x16x32x768>
tensor<[1,16,32,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 32 + d2, d3), memory_config: (512, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<512x768>>, >
shape: #ttnn.shape<1x16x32x768>
tensor<[1,16,32,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 32 + d2, d3), memory_config: (512, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x1536>>, >
shape: #ttnn.shape<1x32x32x1536>
tensor<[1,32,32,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x768>>, >
shape: #ttnn.shape<1x32x32x768>
tensor<[1,32,32,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x384>>, >
shape: #ttnn.shape<1x32x32x384>
tensor<[1,32,32,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x384>>, >
shape: #ttnn.shape<1x32x32x384>
tensor<[1,32,32,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x768>>, >
shape: #ttnn.shape<1x32x32x768>
tensor<[1,32,32,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1024x768>>, >
shape: #ttnn.shape<1x32x32x768>
tensor<[1,32,32,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1024 + d1 * 32 + d2, d3), memory_config: (1024, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1152x1536>>, >
shape: #ttnn.shape<1x32x36x1536>
tensor<[1,32,36,1536,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1152 + d1 * 36 + d2, d3), memory_config: (1152, 1536, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1152x768>>, >
shape: #ttnn.shape<1x32x36x768>
tensor<[1,32,36,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1152 + d1 * 36 + d2, d3), memory_config: (1152, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<768x2>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (768, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<768x2>>, >
shape: #ttnn.shape<1x3x16x16x2>
tensor<[1,3,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (768, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<768x81>>, >
shape: #ttnn.shape<1x3x16x16x81>
tensor<[1,3,16,16,81,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 768 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (768, 81, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x2>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (3072, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x2>>, >
shape: #ttnn.shape<1x3x32x32x2>
tensor<[1,3,32,32,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (3072, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x81>>, >
shape: #ttnn.shape<1x3x32x32x81>
tensor<[1,3,32,32,81,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 3072 + d1 * 1024 + d2 * 32 + d3, d4), memory_config: (3072, 81, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12288x2>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (12288, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12288x2>>, >
shape: #ttnn.shape<1x3x64x64x2>
tensor<[1,3,64,64,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (12288, 2, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<12288x81>>, >
shape: #ttnn.shape<1x3x64x64x81>
tensor<[1,3,64,64,81,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 12288 + d1 * 4096 + d2 * 64 + d3, d4), memory_config: (12288, 81, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x1280>>, >
shape: #ttnn.shape<1x4096x1280>
tensor<[1,4096,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (4096, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x1280>>, >
shape: #ttnn.shape<1x4096x1280>
tensor<[1,4096,1280,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (4096, 1280, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<52x64>>, >
shape: #ttnn.shape<1x4x13x64>
tensor<[1,4,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 13 + d2, d3), memory_config: (52, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<52x64>>, >
shape: #ttnn.shape<1x4x13x64>
tensor<[1,4,13,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 52 + d1 * 13 + d2, d3), memory_config: (52, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1568x128>>, >
shape: #ttnn.shape<1x28x56x128>
tensor<[1,28,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 56 + d2, d3), memory_config: (1568, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1568x128>>, >
shape: #ttnn.shape<1x28x56x128>
tensor<[1,28,56,128,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 56 + d2, d3), memory_config: (1568, 128, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<80x16>>, >
shape: #ttnn.shape<1x5x16x16>
tensor<[1,5,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (80, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<80x16>>, >
shape: #ttnn.shape<1x5x16x16>
tensor<[1,5,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (80, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<80x32>>, >
shape: #ttnn.shape<1x5x16x32>
tensor<[1,5,16,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (80, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<80x32>>, >
shape: #ttnn.shape<1x5x16x32>
tensor<[1,5,16,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 80 + d1 * 16 + d2, d3), memory_config: (80, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5x16>>, >
shape: #ttnn.shape<1x5x16>
tensor<[1,5,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (5, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<5x16>>, >
shape: #ttnn.shape<1x5x16>
tensor<[1,5,16,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (5, 16, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<20x256>>, >
shape: #ttnn.shape<1x5x4x256>
tensor<[1,5,4,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20 + d1 * 4 + d2, d3), memory_config: (20, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<20x256>>, >
shape: #ttnn.shape<1x5x4x256>
tensor<[1,5,4,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20 + d1 * 4 + d2, d3), memory_config: (20, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<20x256>>, >
shape: #ttnn.shape<1x5x4x256>
tensor<[1,5,4,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20 + d1 * 4 + d2, d3), memory_config: (20, 256, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x10>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 10, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x10>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 10, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x11>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 11, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x11>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 11, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x12>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 12, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x12>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 12, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x13>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 13, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x13>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 13, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x14>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x14>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 14, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x15>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 15, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x15>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 15, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x6>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 6, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x6>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 6, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x7>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 7, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x7>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 7, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x8>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 8, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x8>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 8, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x9>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 9, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3072x9>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (3072, 9, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x5120>>, >
shape: #ttnn.shape<1x64x5120>
tensor<[1,64,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (64, 5120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<64x5120>>, >
shape: #ttnn.shape<1x64x5120>
tensor<[1,64,5120,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (64, 5120, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x192>>, >
shape: #ttnn.shape<1x64x64x192>
tensor<[1,64,64,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x192>>, >
shape: #ttnn.shape<1x64x64x192>
tensor<[1,64,64,192,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 192, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x384>>, >
shape: #ttnn.shape<1x64x64x384>
tensor<[1,64,64,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x384>>, >
shape: #ttnn.shape<1x64x64x384>
tensor<[1,64,64,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2048x384>>, >
shape: #ttnn.shape<1x32x64x384>
tensor<[1,32,64,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 64 + d2, d3), memory_config: (2048, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2048x384>>, >
shape: #ttnn.shape<1x32x64x384>
tensor<[1,32,64,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 64 + d2, d3), memory_config: (2048, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2048x768>>, >
shape: #ttnn.shape<1x32x64x768>
tensor<[1,32,64,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 64 + d2, d3), memory_config: (2048, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<2048x768>>, >
shape: #ttnn.shape<1x32x64x768>
tensor<[1,32,64,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 64 + d2, d3), memory_config: (2048, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x384>>, >
shape: #ttnn.shape<1x64x64x384>
tensor<[1,64,64,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4096x768>>, >
shape: #ttnn.shape<1x64x64x768>
tensor<[1,64,64,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (4096, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6x96>>, >
shape: #ttnn.shape<1x6x96>
tensor<[1,6,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (6, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6x16>>, >
shape: #ttnn.shape<1x6x16>
tensor<[1,6,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (6, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<6x16>>, >
shape: #ttnn.shape<1x6x16>
tensor<[1,6,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (6, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<497x32>>, >
shape: #ttnn.shape<1x71x7x32>
tensor<[1,71,7,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (497, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<497x32>>, >
shape: #ttnn.shape<1x71x7x32>
tensor<[1,71,7,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 7 + d2, d3), memory_config: (497, 32, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4608x384>>, >
shape: #ttnn.shape<1x64x72x384>
tensor<[1,64,72,384,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4608 + d1 * 72 + d2, d3), memory_config: (4608, 384, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4608x768>>, >
shape: #ttnn.shape<1x64x72x768>
tensor<[1,64,72,768,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4608 + d1 * 72 + d2, d3), memory_config: (4608, 768, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x96>>, >
shape: #ttnn.shape<1x7x96>
tensor<[1,7,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (7, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x7x16>
tensor<[1,7,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (7, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x7x16>
tensor<[1,7,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (7, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x7x7x512>
tensor<[1,7,7,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x512>>, >
shape: #ttnn.shape<1x7x7x512>
tensor<[1,7,7,512,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (49, 512, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x768>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (7, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x768>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (7, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x768>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (7, 768, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<497x64>>, >
shape: #ttnn.shape<1x7x71x64>
tensor<[1,7,71,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 497 + d1 * 71 + d2, d3), memory_config: (497, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x8x96>
tensor<[1,8,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (8, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x8x16>
tensor<[1,8,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (8, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x8x16>
tensor<[1,8,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (8, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x64>>, >
shape: #ttnn.shape<1x8x32x64>
tensor<[1,8,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (256, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<256x64>>, >
shape: #ttnn.shape<1x8x32x64>
tensor<[1,8,32,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 32 + d2, d3), memory_config: (256, 64, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x96>>, >
shape: #ttnn.shape<1x9x96>
tensor<[1,9,96,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (9, 96, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x16>>, >
shape: #ttnn.shape<1x9x16>
tensor<[1,9,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (9, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<9x16>>, >
shape: #ttnn.shape<1x9x16>
tensor<[1,9,16,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (9, 16, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<21x3>>, >
shape: #ttnn.shape<21x3>
tensor<[21,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (21, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<21x4>>, >
shape: #ttnn.shape<21x4>
tensor<[21,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (21, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<21x21>>, >
shape: #ttnn.shape<21x21>
tensor<[21,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (21, 21, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x28>>, >
shape: #ttnn.shape<3x28>
tensor<[3,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x28>>, >
shape: #ttnn.shape<4x28>
tensor<[4,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<21x28>>, >
shape: #ttnn.shape<21x28>
tensor<[21,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (21, 28, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x2>>, >
shape: #ttnn.shape<3234x2>
tensor<[3234,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 2, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x2>>, >
shape: #ttnn.shape<3234x2>
tensor<[3234,2,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 2, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3234x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3234, 1, 'f32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x3>>, >
shape: #ttnn.shape<3x3>
tensor<[3,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x4>>, >
shape: #ttnn.shape<3x4>
tensor<[3,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x7>>, >
shape: #ttnn.shape<3x7>
tensor<[3,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 7, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x3>>, >
shape: #ttnn.shape<3x3>
tensor<[3,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x4>>, >
shape: #ttnn.shape<3x4>
tensor<[3,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x21>>, >
shape: #ttnn.shape<3x21>
tensor<[3,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 21, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x3>>, >
shape: #ttnn.shape<3x3>
tensor<[3,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x4>>, >
shape: #ttnn.shape<3x4>
tensor<[3,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x49>>, >
shape: #ttnn.shape<3x49>
tensor<[3,49,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 49, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x3>>, >
shape: #ttnn.shape<49x3>
tensor<[49,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (49, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x4>>, >
shape: #ttnn.shape<49x4>
tensor<[49,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (49, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x49>>, >
shape: #ttnn.shape<49x49>
tensor<[49,49,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (49, 49, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<4x3>
tensor<[4,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x4>>, >
shape: #ttnn.shape<4x4>
tensor<[4,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x7>>, >
shape: #ttnn.shape<4x7>
tensor<[4,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 7, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<4x3>
tensor<[4,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x4>>, >
shape: #ttnn.shape<4x4>
tensor<[4,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x21>>, >
shape: #ttnn.shape<4x21>
tensor<[4,21,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 21, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x3>>, >
shape: #ttnn.shape<4x3>
tensor<[4,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x4>>, >
shape: #ttnn.shape<4x4>
tensor<[4,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x49>>, >
shape: #ttnn.shape<4x49>
tensor<[4,49,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 49, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<3x56>>, >
shape: #ttnn.shape<3x56>
tensor<[3,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (3, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<4x56>>, >
shape: #ttnn.shape<4x56>
tensor<[4,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (4, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<49x56>>, >
shape: #ttnn.shape<49x56>
tensor<[49,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (49, 56, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x3>>, >
shape: #ttnn.shape<7x3>
tensor<[7,3,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 3, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x4>>, >
shape: #ttnn.shape<7x4>
tensor<[7,4,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 4, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<7x7>>, >
shape: #ttnn.shape<7x7>
tensor<[7,7,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (7, 7, 'bf16', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout<row_major>
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<8x1>
tensor<[8,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (8, 1, 'ui32', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x11x3072>
tensor<[1,11,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 11 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x12x3072>
tensor<[1,12,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x13x3072>
tensor<[1,13,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x15x3072>
tensor<[1,15,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x6x3072>
tensor<[1,6,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x8x3072>
tensor<[1,8,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x15>
tensor<[1,3072,15,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x14>
tensor<[1,3072,14,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x13>
tensor<[1,3072,13,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x12>
tensor<[1,3072,12,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x11>
tensor<[1,3072,11,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x10>
tensor<[1,3072,10,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x9>
tensor<[1,3072,9,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x8>
tensor<[1,3072,8,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x7>
tensor<[1,3072,7,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x3072x6>
tensor<[1,3072,6,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 3072 + d1, d2), memory_config: (96, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x5x16x16x2>
tensor<[1,5,16,16,2,bf16]>mapping_from: (d0, d1, d2, d3, d4), mapping_to: (d0 * 1280 + d1 * 256 + d2 * 16 + d3, d4), memory_config: (40, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<19x8>>, >
shape: #ttnn.shape<6x100x1x256>
tensor<[6,100,1,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 100 + d1 + d2, d3), memory_config: (19, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1x4>
tensor<[3234,1,4,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<3234x2x2>
tensor<[3234,2,2,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2 + d1, d2), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<4x4x1x2048>
tensor<[4,4,1,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4 + d1 + d2, d3), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<12>
tensor<[12,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<16x49x49>
tensor<[16,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (25, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x100x14x14>
tensor<[1,100,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 14 + d2, d3), memory_config: (44, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1024x10x10>
tensor<[1,1024,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 10 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x1024x14x14>
tensor<[1,1024,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 14 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x1024x1536>
tensor<[1,1024,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1x1024x160>
tensor<[1,1024,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x1024x16x16>
tensor<[1,1024,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 16 + d2, d3), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<608x1>>, >
shape: #ttnn.shape<1x1024x19x19>
tensor<[1,1024,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19456 + d1 * 19 + d2, d3), memory_config: (608, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x1024x28x28>
tensor<[1,1024,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 28 + d2, d3), memory_config: (896, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x1024x3072>
tensor<[1,1024,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1x1024x768>
tensor<[1,1024,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x1024x7x7>
tensor<[1,1024,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 7 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<462x1>>, >
shape: #ttnn.shape<1x1056x14x14>
tensor<[1,1056,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14784 + d1 * 14 + d2, d3), memory_config: (462, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<231x1>>, >
shape: #ttnn.shape<1x1056x7x7>
tensor<[1,1056,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7392 + d1 * 7 + d2, d3), memory_config: (231, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<476x1>>, >
shape: #ttnn.shape<1x1088x14x14>
tensor<[1,1088,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15232 + d1 * 14 + d2, d3), memory_config: (476, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x1088x7x7>
tensor<[1,1088,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 7 + d2, d3), memory_config: (238, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<490x1>>, >
shape: #ttnn.shape<1x1120x14x14>
tensor<[1,1120,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15680 + d1 * 14 + d2, d3), memory_config: (490, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x1>>, >
shape: #ttnn.shape<1x1120x7x7>
tensor<[1,1120,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 7 + d2, d3), memory_config: (245, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x112x14x14>
tensor<[1,112,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 14 + d2, d3), memory_config: (49, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x112x15x15>
tensor<[1,112,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 15 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x112x20x20>
tensor<[1,112,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 20 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x112x24x24>
tensor<[1,112,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 24 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x112x7x7>
tensor<[1,112,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 7 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x1152x14x14>
tensor<[1,1152,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 14 + d2, d3), memory_config: (504, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x1152x7x7>
tensor<[1,1152,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 7 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x1>>, >
shape: #ttnn.shape<1x1152x8x8>
tensor<[1,1152,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 8 + d2, d3), memory_config: (288, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x116x14x14>
tensor<[1,116,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 14 + d2, d3), memory_config: (51, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<518x1>>, >
shape: #ttnn.shape<1x1184x14x14>
tensor<[1,1184,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16576 + d1 * 14 + d2, d3), memory_config: (518, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<259x1>>, >
shape: #ttnn.shape<1x1184x7x7>
tensor<[1,1184,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8288 + d1 * 7 + d2, d3), memory_config: (259, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1x1200x320>
tensor<[1,1200,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x120x14x14>
tensor<[1,120,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 14 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x120x17x17>
tensor<[1,120,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2040 + d1 * 17 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x120x28x28>
tensor<[1,120,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 28 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x2>>, >
shape: #ttnn.shape<1x120x40x40>
tensor<[1,120,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 40 + d2, d3), memory_config: (150, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<532x1>>, >
shape: #ttnn.shape<1x1216x14x14>
tensor<[1,1216,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17024 + d1 * 14 + d2, d3), memory_config: (532, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x1216x7x7>
tensor<[1,1216,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 7 + d2, d3), memory_config: (266, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<546x1>>, >
shape: #ttnn.shape<1x1248x14x14>
tensor<[1,1248,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17472 + d1 * 14 + d2, d3), memory_config: (546, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<273x1>>, >
shape: #ttnn.shape<1x1248x7x7>
tensor<[1,1248,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8736 + d1 * 7 + d2, d3), memory_config: (273, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<351x1>>, >
shape: #ttnn.shape<1x1248x9x9>
tensor<[1,1248,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11232 + d1 * 9 + d2, d3), memory_config: (351, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<400x1>>, >
shape: #ttnn.shape<1x1280x10x10>
tensor<[1,1280,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12800 + d1 * 10 + d2, d3), memory_config: (400, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1280x12x12>
tensor<[1,1280,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 12 + d2, d3), memory_config: (480, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<560x1>>, >
shape: #ttnn.shape<1x1280x14x14>
tensor<[1,1280,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17920 + d1 * 14 + d2, d3), memory_config: (560, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x1280x7x7>
tensor<[1,1280,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 7 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x1>>, >
shape: #ttnn.shape<1x1280x8x8>
tensor<[1,1280,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 8 + d2, d3), memory_config: (320, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x1280x9x9>
tensor<[1,1280,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 9 + d2, d3), memory_config: (360, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x4>>, >
shape: #ttnn.shape<1x128x112x112>
tensor<[1,128,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 112 + d2, d3), memory_config: (448, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x4>>, >
shape: #ttnn.shape<1x128x128x128>
tensor<[1,128,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 128 + d2, d3), memory_config: (512, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x128x14x14>
tensor<[1,128,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 14 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x5>>, >
shape: #ttnn.shape<1x128x150x150>
tensor<[1,128,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 150 + d2, d3), memory_config: (600, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<68x1>>, >
shape: #ttnn.shape<1x128x17x17>
tensor<[1,128,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2176 + d1 * 17 + d2, d3), memory_config: (68, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x128x1x1>
tensor<[1,128,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x128x28x28>
tensor<[1,128,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 28 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x128x2x2>
tensor<[1,128,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 2 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x128x32x32>
tensor<[1,128,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 32 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<12x1>>, >
shape: #ttnn.shape<1x128x3x3>
tensor<[1,128,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 384 + d1 * 3 + d2, d3), memory_config: (12, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x2>>, >
shape: #ttnn.shape<1x128x56x56>
tensor<[1,128,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 56 + d2, d3), memory_config: (224, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x1>>, >
shape: #ttnn.shape<1x128x5x5>
tensor<[1,128,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 5 + d2, d3), memory_config: (20, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x2>>, >
shape: #ttnn.shape<1x128x64x64>
tensor<[1,128,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 64 + d2, d3), memory_config: (256, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x3>>, >
shape: #ttnn.shape<1x128x75x75>
tensor<[1,128,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 75 + d2, d3), memory_config: (300, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x128x7x7>
tensor<[1,128,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 7 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x2>>, >
shape: #ttnn.shape<1x12x56x56>
tensor<[1,12,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 56 + d2, d3), memory_config: (21, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x12x768>
tensor<[1,12,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<574x1>>, >
shape: #ttnn.shape<1x1312x14x14>
tensor<[1,1312,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18368 + d1 * 14 + d2, d3), memory_config: (574, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<287x1>>, >
shape: #ttnn.shape<1x1312x7x7>
tensor<[1,1312,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9184 + d1 * 7 + d2, d3), memory_config: (287, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x1344x14x14>
tensor<[1,1344,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 14 + d2, d3), memory_config: (588, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x1>>, >
shape: #ttnn.shape<1x1344x28x28>
tensor<[1,1344,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 28 + d2, d3), memory_config: (1176, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x1344x7x7>
tensor<[1,1344,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 7 + d2, d3), memory_config: (294, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<118x1>>, >
shape: #ttnn.shape<1x134x28x28>
tensor<[1,134,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3752 + d1 * 28 + d2, d3), memory_config: (118, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x136x19x19>
tensor<[1,136,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2584 + d1 * 19 + d2, d3), memory_config: (81, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<602x1>>, >
shape: #ttnn.shape<1x1376x14x14>
tensor<[1,1376,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19264 + d1 * 14 + d2, d3), memory_config: (602, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<301x1>>, >
shape: #ttnn.shape<1x1376x7x7>
tensor<[1,1376,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9632 + d1 * 7 + d2, d3), memory_config: (301, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<435x1>>, >
shape: #ttnn.shape<1x1392x10x10>
tensor<[1,1392,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13920 + d1 * 10 + d2, d3), memory_config: (435, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x1392x14x14>
tensor<[1,1392,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 14 + d2, d3), memory_config: (609, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x1>>, >
shape: #ttnn.shape<1x1392x28x28>
tensor<[1,1392,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 28 + d2, d3), memory_config: (1218, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<616x1>>, >
shape: #ttnn.shape<1x1408x14x14>
tensor<[1,1408,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19712 + d1 * 14 + d2, d3), memory_config: (616, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x1408x7x7>
tensor<[1,1408,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 7 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<630x1>>, >
shape: #ttnn.shape<1x1440x14x14>
tensor<[1,1440,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20160 + d1 * 14 + d2, d3), memory_config: (630, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x1440x7x7>
tensor<[1,1440,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 7 + d2, d3), memory_config: (315, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x144x14x14>
tensor<[1,144,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 14 + d2, d3), memory_config: (63, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<675x5>>, >
shape: #ttnn.shape<1x144x150x150>
tensor<[1,144,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21600 + d1 * 150 + d2, d3), memory_config: (675, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<855x6>>, >
shape: #ttnn.shape<1x144x190x190>
tensor<[1,144,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27360 + d1 * 190 + d2, d3), memory_config: (855, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x144x28x28>
tensor<[1,144,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 28 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<135x1>>, >
shape: #ttnn.shape<1x144x30x30>
tensor<[1,144,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4320 + d1 * 30 + d2, d3), memory_config: (135, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x2>>, >
shape: #ttnn.shape<1x144x33x33>
tensor<[1,144,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4752 + d1 * 33 + d2, d3), memory_config: (149, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x2>>, >
shape: #ttnn.shape<1x144x56x56>
tensor<[1,144,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 56 + d2, d3), memory_config: (252, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<270x2>>, >
shape: #ttnn.shape<1x144x60x60>
tensor<[1,144,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8640 + d1 * 60 + d2, d3), memory_config: (270, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<293x3>>, >
shape: #ttnn.shape<1x144x65x65>
tensor<[1,144,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9360 + d1 * 65 + d2, d3), memory_config: (293, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<338x3>>, >
shape: #ttnn.shape<1x144x75x75>
tensor<[1,144,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10800 + d1 * 75 + d2, d3), memory_config: (338, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x144x7x7>
tensor<[1,144,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 7 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<428x3>>, >
shape: #ttnn.shape<1x144x95x95>
tensor<[1,144,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13680 + d1 * 95 + d2, d3), memory_config: (428, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<644x1>>, >
shape: #ttnn.shape<1x1472x14x14>
tensor<[1,1472,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20608 + d1 * 14 + d2, d3), memory_config: (644, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x1472x7x7>
tensor<[1,1472,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 7 + d2, d3), memory_config: (322, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x14x14x1024>
tensor<[1,14,14,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<1x14x56x56>
tensor<[1,14,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 56 + d2, d3), memory_config: (25, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x14x768>
tensor<[1,14,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<658x1>>, >
shape: #ttnn.shape<1x1504x14x14>
tensor<[1,1504,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21056 + d1 * 14 + d2, d3), memory_config: (658, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<329x1>>, >
shape: #ttnn.shape<1x1504x7x7>
tensor<[1,1504,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10528 + d1 * 7 + d2, d3), memory_config: (329, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<480x1>>, >
shape: #ttnn.shape<1x1536x10x10>
tensor<[1,1536,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15360 + d1 * 10 + d2, d3), memory_config: (480, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<672x1>>, >
shape: #ttnn.shape<1x1536x14x14>
tensor<[1,1536,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21504 + d1 * 14 + d2, d3), memory_config: (672, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x1536x7x7>
tensor<[1,1536,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 7 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<686x1>>, >
shape: #ttnn.shape<1x1568x14x14>
tensor<[1,1568,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 21952 + d1 * 14 + d2, d3), memory_config: (686, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<343x1>>, >
shape: #ttnn.shape<1x1568x7x7>
tensor<[1,1568,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10976 + d1 * 7 + d2, d3), memory_config: (343, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<700x1>>, >
shape: #ttnn.shape<1x1600x14x14>
tensor<[1,1600,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22400 + d1 * 14 + d2, d3), memory_config: (700, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x1600x7x7>
tensor<[1,1600,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 7 + d2, d3), memory_config: (350, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x160x14x14>
tensor<[1,160,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 14 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x160x24x24>
tensor<[1,160,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 24 + d2, d3), memory_config: (120, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x160x28x28>
tensor<[1,160,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 28 + d2, d3), memory_config: (140, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x2>>, >
shape: #ttnn.shape<1x160x56x56>
tensor<[1,160,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 56 + d2, d3), memory_config: (280, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x160x7x7>
tensor<[1,160,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 7 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<612x1>>, >
shape: #ttnn.shape<1x1632x12x12>
tensor<[1,1632,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19584 + d1 * 12 + d2, d3), memory_config: (612, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<714x1>>, >
shape: #ttnn.shape<1x1632x14x14>
tensor<[1,1632,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 22848 + d1 * 14 + d2, d3), memory_config: (714, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<357x1>>, >
shape: #ttnn.shape<1x1632x7x7>
tensor<[1,1632,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11424 + d1 * 7 + d2, d3), memory_config: (357, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<1x16384x192>
tensor<[1,16384,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<1x16384x384>
tensor<[1,16384,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x16384x768>
tensor<[1,16384,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<728x1>>, >
shape: #ttnn.shape<1x1664x14x14>
tensor<[1,1664,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23296 + d1 * 14 + d2, d3), memory_config: (728, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x1664x7x7>
tensor<[1,1664,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 7 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x168x28x28>
tensor<[1,168,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 28 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<742x1>>, >
shape: #ttnn.shape<1x1696x14x14>
tensor<[1,1696,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23744 + d1 * 14 + d2, d3), memory_config: (742, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<371x1>>, >
shape: #ttnn.shape<1x1696x7x7>
tensor<[1,1696,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11872 + d1 * 7 + d2, d3), memory_config: (371, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x4>>, >
shape: #ttnn.shape<1x16x112x112>
tensor<[1,16,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 112 + d2, d3), memory_config: (56, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x4>>, >
shape: #ttnn.shape<1x16x120x120>
tensor<[1,16,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 120 + d2, d3), memory_config: (60, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<65x5>>, >
shape: #ttnn.shape<1x16x130x130>
tensor<[1,16,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2080 + d1 * 130 + d2, d3), memory_config: (65, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x16x14x14>
tensor<[1,16,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x5>>, >
shape: #ttnn.shape<1x16x160x160>
tensor<[1,16,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 160 + d2, d3), memory_config: (80, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x7>>, >
shape: #ttnn.shape<1x16x224x224>
tensor<[1,16,224,224,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 224 + d2, d3), memory_config: (112, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x16x28x28>
tensor<[1,16,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 28 + d2, d3), memory_config: (14, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x2>>, >
shape: #ttnn.shape<1x16x56x56>
tensor<[1,16,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 56 + d2, d3), memory_config: (28, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<756x1>>, >
shape: #ttnn.shape<1x1728x14x14>
tensor<[1,1728,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24192 + d1 * 14 + d2, d3), memory_config: (756, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x1728x7x7>
tensor<[1,1728,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 7 + d2, d3), memory_config: (378, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<770x1>>, >
shape: #ttnn.shape<1x1760x14x14>
tensor<[1,1760,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 24640 + d1 * 14 + d2, d3), memory_config: (770, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<385x1>>, >
shape: #ttnn.shape<1x1760x7x7>
tensor<[1,1760,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12320 + d1 * 7 + d2, d3), memory_config: (385, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<784x1>>, >
shape: #ttnn.shape<1x1792x14x14>
tensor<[1,1792,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25088 + d1 * 14 + d2, d3), memory_config: (784, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x1792x7x7>
tensor<[1,1792,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 7 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<399x1>>, >
shape: #ttnn.shape<1x1824x7x7>
tensor<[1,1824,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12768 + d1 * 7 + d2, d3), memory_config: (399, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<81x1>>, >
shape: #ttnn.shape<1x184x14x14>
tensor<[1,184,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2576 + d1 * 14 + d2, d3), memory_config: (81, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<115x1>>, >
shape: #ttnn.shape<1x184x20x20>
tensor<[1,184,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3680 + d1 * 20 + d2, d3), memory_config: (115, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x184x7x7>
tensor<[1,184,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 7 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x1856x7x7>
tensor<[1,1856,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 7 + d2, d3), memory_config: (406, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<413x1>>, >
shape: #ttnn.shape<1x1888x7x7>
tensor<[1,1888,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13216 + d1 * 7 + d2, d3), memory_config: (413, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x18x14x14>
tensor<[1,18,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 14 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x18x28x28>
tensor<[1,18,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 28 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x2>>, >
shape: #ttnn.shape<1x18x56x56>
tensor<[1,18,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 56 + d2, d3), memory_config: (32, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x18x7x7>
tensor<[1,18,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 126 + d1 * 7 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x1920x7x7>
tensor<[1,1920,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 7 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x192x14x14>
tensor<[1,192,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 14 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x192x17x17>
tensor<[1,192,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 17 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x192x28x28>
tensor<[1,192,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 28 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x2>>, >
shape: #ttnn.shape<1x192x35x35>
tensor<[1,192,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 35 + d2, d3), memory_config: (210, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<228x2>>, >
shape: #ttnn.shape<1x192x38x38>
tensor<[1,192,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7296 + d1 * 38 + d2, d3), memory_config: (228, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<288x2>>, >
shape: #ttnn.shape<1x192x48x48>
tensor<[1,192,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9216 + d1 * 48 + d2, d3), memory_config: (288, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x2>>, >
shape: #ttnn.shape<1x192x56x56>
tensor<[1,192,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 56 + d2, d3), memory_config: (336, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<450x3>>, >
shape: #ttnn.shape<1x192x75x75>
tensor<[1,192,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14400 + d1 * 75 + d2, d3), memory_config: (450, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x192x7x7>
tensor<[1,192,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 7 + d2, d3), memory_config: (42, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<48x1>>, >
shape: #ttnn.shape<1x192x8x8>
tensor<[1,192,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1536 + d1 * 8 + d2, d3), memory_config: (48, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<570x3>>, >
shape: #ttnn.shape<1x192x95x95>
tensor<[1,192,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18240 + d1 * 95 + d2, d3), memory_config: (570, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x196x14x14>
tensor<[1,196,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 14 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<88x1>>, >
shape: #ttnn.shape<1x200x14x14>
tensor<[1,200,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2800 + d1 * 14 + d2, d3), memory_config: (88, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<125x1>>, >
shape: #ttnn.shape<1x200x20x20>
tensor<[1,200,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4000 + d1 * 20 + d2, d3), memory_config: (125, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<44x1>>, >
shape: #ttnn.shape<1x200x7x7>
tensor<[1,200,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1400 + d1 * 7 + d2, d3), memory_config: (44, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<640x1>>, >
shape: #ttnn.shape<1x2048x10x10>
tensor<[1,2048,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 20480 + d1 * 10 + d2, d3), memory_config: (640, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x1>>, >
shape: #ttnn.shape<1x2048x14x14>
tensor<[1,2048,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 14 + d2, d3), memory_config: (896, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x2048x7x7>
tensor<[1,2048,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 7 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<91x1>>, >
shape: #ttnn.shape<1x208x14x14>
tensor<[1,208,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2912 + d1 * 14 + d2, d3), memory_config: (91, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<59x1>>, >
shape: #ttnn.shape<1x208x9x9>
tensor<[1,208,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1872 + d1 * 9 + d2, d3), memory_config: (59, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x20x28x28>
tensor<[1,20,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 28 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x224x14x14>
tensor<[1,224,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 14 + d2, d3), memory_config: (98, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x1>>, >
shape: #ttnn.shape<1x224x17x17>
tensor<[1,224,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 17 + d2, d3), memory_config: (119, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x224x28x28>
tensor<[1,224,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 28 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<245x2>>, >
shape: #ttnn.shape<1x224x35x35>
tensor<[1,224,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7840 + d1 * 35 + d2, d3), memory_config: (245, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x2>>, >
shape: #ttnn.shape<1x224x56x56>
tensor<[1,224,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 56 + d2, d3), memory_config: (392, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x1>>, >
shape: #ttnn.shape<1x224x7x7>
tensor<[1,224,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1568 + d1 * 7 + d2, d3), memory_config: (49, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<73x1>>, >
shape: #ttnn.shape<1x232x10x10>
tensor<[1,232,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2320 + d1 * 10 + d2, d3), memory_config: (73, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x4>>, >
shape: #ttnn.shape<1x232x112x112>
tensor<[1,232,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 112 + d2, d3), memory_config: (812, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x2>>, >
shape: #ttnn.shape<1x232x56x56>
tensor<[1,232,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 56 + d2, d3), memory_config: (406, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x240x14x14>
tensor<[1,240,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 14 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x1>>, >
shape: #ttnn.shape<1x240x15x15>
tensor<[1,240,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 15 + d2, d3), memory_config: (113, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x240x20x20>
tensor<[1,240,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 20 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x240x28x28>
tensor<[1,240,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 28 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x240x30x30>
tensor<[1,240,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 30 + d2, d3), memory_config: (225, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x2>>, >
shape: #ttnn.shape<1x240x40x40>
tensor<[1,240,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 40 + d2, d3), memory_config: (300, 2, 'tile<32x32, f32>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x4>>, >
shape: #ttnn.shape<1x24x112x112>
tensor<[1,24,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 112 + d2, d3), memory_config: (84, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x24x14x14>
tensor<[1,24,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 14 + d2, d3), memory_config: (11, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<113x5>>, >
shape: #ttnn.shape<1x24x150x150>
tensor<[1,24,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3600 + d1 * 150 + d2, d3), memory_config: (113, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<143x6>>, >
shape: #ttnn.shape<1x24x190x190>
tensor<[1,24,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4560 + d1 * 190 + d2, d3), memory_config: (143, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x24x28x28>
tensor<[1,24,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 28 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x2>>, >
shape: #ttnn.shape<1x24x56x56>
tensor<[1,24,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 56 + d2, d3), memory_config: (42, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<45x2>>, >
shape: #ttnn.shape<1x24x60x60>
tensor<[1,24,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1440 + d1 * 60 + d2, d3), memory_config: (45, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<49x3>>, >
shape: #ttnn.shape<1x24x65x65>
tensor<[1,24,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1560 + d1 * 65 + d2, d3), memory_config: (49, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x24x80x80>
tensor<[1,24,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 80 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1103x1>>, >
shape: #ttnn.shape<1x2520x14x14>
tensor<[1,2520,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 35280 + d1 * 14 + d2, d3), memory_config: (1103, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<552x1>>, >
shape: #ttnn.shape<1x2520x7x7>
tensor<[1,2520,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 17640 + d1 * 7 + d2, d3), memory_config: (552, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x256x10x10>
tensor<[1,256,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 10 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1024x4>>, >
shape: #ttnn.shape<1x256x128x128>
tensor<[1,256,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32768 + d1 * 128 + d2, d3), memory_config: (1024, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x256x14x14>
tensor<[1,256,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 14 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<1x256x1536>
tensor<[1,256,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x256x160>
tensor<[1,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x256x16x16>
tensor<[1,256,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 16 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<136x1>>, >
shape: #ttnn.shape<1x256x17x17>
tensor<[1,256,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4352 + d1 * 17 + d2, d3), memory_config: (136, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x256x28x28>
tensor<[1,256,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 28 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x256x2x2>
tensor<[1,256,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 512 + d1 * 2 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x256x3072>
tensor<[1,256,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x32>
tensor<[1,256,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x256x32x32>
tensor<[1,256,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 32 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<304x2>>, >
shape: #ttnn.shape<1x256x38x38>
tensor<[1,256,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9728 + d1 * 38 + d2, d3), memory_config: (304, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<24x1>>, >
shape: #ttnn.shape<1x256x3x3>
tensor<[1,256,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 768 + d1 * 3 + d2, d3), memory_config: (24, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x2>>, >
shape: #ttnn.shape<1x256x56x56>
tensor<[1,256,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 56 + d2, d3), memory_config: (448, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x1>>, >
shape: #ttnn.shape<1x256x5x5>
tensor<[1,256,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 5 + d2, d3), memory_config: (40, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x256x64>
tensor<[1,256,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x2>>, >
shape: #ttnn.shape<1x256x64x64>
tensor<[1,256,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 64 + d2, d3), memory_config: (512, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x3>>, >
shape: #ttnn.shape<1x256x75x75>
tensor<[1,256,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19200 + d1 * 75 + d2, d3), memory_config: (600, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x256x7x7>
tensor<[1,256,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 7 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x256x8x8>
tensor<[1,256,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2048 + d1 * 8 + d2, d3), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x272x12x12>
tensor<[1,272,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3264 + d1 * 12 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x1>>, >
shape: #ttnn.shape<1x272x7x7>
tensor<[1,272,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1904 + d1 * 7 + d2, d3), memory_config: (60, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x27x768>
tensor<[1,27,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x288x14x14>
tensor<[1,288,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 14 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<153x1>>, >
shape: #ttnn.shape<1x288x17x17>
tensor<[1,288,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4896 + d1 * 17 + d2, d3), memory_config: (153, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<171x1>>, >
shape: #ttnn.shape<1x288x19x19>
tensor<[1,288,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5472 + d1 * 19 + d2, d3), memory_config: (171, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x288x28x28>
tensor<[1,288,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 28 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<297x2>>, >
shape: #ttnn.shape<1x288x33x33>
tensor<[1,288,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9504 + d1 * 33 + d2, d3), memory_config: (297, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x2>>, >
shape: #ttnn.shape<1x288x38x38>
tensor<[1,288,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 38 + d2, d3), memory_config: (342, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x28>
tensor<[1,28,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x16>>, >
shape: #ttnn.shape<1x28x28x512>
tensor<[1,28,28,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x300x128>
tensor<[1,300,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<1x300x320>
tensor<[1,300,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x300x512>
tensor<[1,300,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x300x64>
tensor<[1,300,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<140x1>>, >
shape: #ttnn.shape<1x320x14x14>
tensor<[1,320,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4480 + d1 * 14 + d2, d3), memory_config: (140, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<170x1>>, >
shape: #ttnn.shape<1x320x17x17>
tensor<[1,320,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5440 + d1 * 17 + d2, d3), memory_config: (170, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x320x28x28>
tensor<[1,320,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 28 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x1>>, >
shape: #ttnn.shape<1x320x7x7>
tensor<[1,320,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 7 + d2, d3), memory_config: (70, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x320x8x8>
tensor<[1,320,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 8 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32>
tensor<[1,32,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x32>>, >
shape: #ttnn.shape<1x32x10x1024>
tensor<[1,32,10,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 10 + d2, d3), memory_config: (10, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x128>>, >
shape: #ttnn.shape<1x32x10x4096>
tensor<[1,32,10,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 10 + d2, d3), memory_config: (10, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x4>>, >
shape: #ttnn.shape<1x32x112x112>
tensor<[1,32,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 112 + d2, d3), memory_config: (112, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x4>>, >
shape: #ttnn.shape<1x32x120x120>
tensor<[1,32,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x5>>, >
shape: #ttnn.shape<1x32x120x160>
tensor<[1,32,120,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 120 + d2, d3), memory_config: (120, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x4>>, >
shape: #ttnn.shape<1x32x128x128>
tensor<[1,32,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 128 + d2, d3), memory_config: (128, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<130x5>>, >
shape: #ttnn.shape<1x32x130x130>
tensor<[1,32,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4160 + d1 * 130 + d2, d3), memory_config: (130, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x5>>, >
shape: #ttnn.shape<1x32x147x147>
tensor<[1,32,147,147,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 147 + d2, d3), memory_config: (147, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<149x5>>, >
shape: #ttnn.shape<1x32x149x149>
tensor<[1,32,149,149,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4768 + d1 * 149 + d2, d3), memory_config: (149, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<14x1>>, >
shape: #ttnn.shape<1x32x14x14>
tensor<[1,32,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 448 + d1 * 14 + d2, d3), memory_config: (14, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x5>>, >
shape: #ttnn.shape<1x32x150x150>
tensor<[1,32,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 150 + d2, d3), memory_config: (150, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<190x6>>, >
shape: #ttnn.shape<1x32x190x190>
tensor<[1,32,190,190,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6080 + d1 * 190 + d2, d3), memory_config: (190, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x32>>, >
shape: #ttnn.shape<1x32x20x1024>
tensor<[1,32,20,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x8>>, >
shape: #ttnn.shape<1x32x20x256>
tensor<[1,32,20,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x128>>, >
shape: #ttnn.shape<1x32x20x4096>
tensor<[1,32,20,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x8>>, >
shape: #ttnn.shape<1x32x256x256>
tensor<[1,32,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 256 + d2, d3), memory_config: (256, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x32x28x28>
tensor<[1,32,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 28 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x32>>, >
shape: #ttnn.shape<1x32x30x1024>
tensor<[1,32,30,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x128>>, >
shape: #ttnn.shape<1x32x30x4096>
tensor<[1,32,30,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x2>>, >
shape: #ttnn.shape<1x32x30x40>
tensor<[1,32,30,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x32>>, >
shape: #ttnn.shape<1x32x40x1024>
tensor<[1,32,40,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x8>>, >
shape: #ttnn.shape<1x32x40x256>
tensor<[1,32,40,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x2>>, >
shape: #ttnn.shape<1x32x40x64>
tensor<[1,32,40,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x16>>, >
shape: #ttnn.shape<1x32x512x512>
tensor<[1,32,512,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 512 + d2, d3), memory_config: (512, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x2>>, >
shape: #ttnn.shape<1x32x56x56>
tensor<[1,32,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 56 + d2, d3), memory_config: (56, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x32>>, >
shape: #ttnn.shape<1x32x60x1024>
tensor<[1,32,60,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x8>>, >
shape: #ttnn.shape<1x32x60x256>
tensor<[1,32,60,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x3>>, >
shape: #ttnn.shape<1x32x60x80>
tensor<[1,32,60,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<75x3>>, >
shape: #ttnn.shape<1x32x75x75>
tensor<[1,32,75,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2400 + d1 * 75 + d2, d3), memory_config: (75, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x32x7x7>
tensor<[1,32,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 224 + d1 * 7 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x8>>, >
shape: #ttnn.shape<1x32x80x256>
tensor<[1,32,80,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 80 + d2, d3), memory_config: (80, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x2>>, >
shape: #ttnn.shape<1x32x80x64>
tensor<[1,32,80,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 80 + d2, d3), memory_config: (80, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<95x3>>, >
shape: #ttnn.shape<1x32x95x95>
tensor<[1,32,95,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3040 + d1 * 95 + d2, d3), memory_config: (95, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x334x14x14>
tensor<[1,334,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4676 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x4>>, >
shape: #ttnn.shape<1x336x112x112>
tensor<[1,336,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 112 + d2, d3), memory_config: (1176, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x336x14x14>
tensor<[1,336,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 14 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x336x24x24>
tensor<[1,336,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 24 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x2>>, >
shape: #ttnn.shape<1x336x48x48>
tensor<[1,336,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 48 + d2, d3), memory_config: (504, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x2>>, >
shape: #ttnn.shape<1x336x56x56>
tensor<[1,336,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 56 + d2, d3), memory_config: (588, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x34x28x28>
tensor<[1,34,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 28 + d2, d3), memory_config: (30, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<154x1>>, >
shape: #ttnn.shape<1x352x14x14>
tensor<[1,352,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4928 + d1 * 14 + d2, d3), memory_config: (154, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x352x28x28>
tensor<[1,352,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 28 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<99x1>>, >
shape: #ttnn.shape<1x352x9x9>
tensor<[1,352,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3168 + d1 * 9 + d2, d3), memory_config: (99, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x36x14x14>
tensor<[1,36,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 14 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x36x28x28>
tensor<[1,36,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 28 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x2>>, >
shape: #ttnn.shape<1x36x56x56>
tensor<[1,36,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 56 + d2, d3), memory_config: (63, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x36x7x7>
tensor<[1,36,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 252 + d1 * 7 + d2, d3), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1624x1>>, >
shape: #ttnn.shape<1x3712x14x14>
tensor<[1,3712,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 51968 + d1 * 14 + d2, d3), memory_config: (1624, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<812x1>>, >
shape: #ttnn.shape<1x3712x7x7>
tensor<[1,3712,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25984 + d1 * 7 + d2, d3), memory_config: (812, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x1>>, >
shape: #ttnn.shape<1x384x10x10>
tensor<[1,384,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 10 + d2, d3), memory_config: (120, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x384x14x14>
tensor<[1,384,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 14 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<204x1>>, >
shape: #ttnn.shape<1x384x17x17>
tensor<[1,384,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6528 + d1 * 17 + d2, d3), memory_config: (204, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x384x28x28>
tensor<[1,384,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 28 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x384x7x7>
tensor<[1,384,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 7 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<96x1>>, >
shape: #ttnn.shape<1x384x8x8>
tensor<[1,384,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3072 + d1 * 8 + d2, d3), memory_config: (96, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x4096x1536>
tensor<[1,4096,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<1x4096x384>
tensor<[1,4096,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x4096x64>
tensor<[1,4096,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x4096x768>
tensor<[1,4096,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x40x14x14>
tensor<[1,40,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 14 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x40x28x28>
tensor<[1,40,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 28 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x40x30x30>
tensor<[1,40,30,30,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 30 + d2, d3), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x40x40x40>
tensor<[1,40,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 40 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x40x56x56>
tensor<[1,40,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 56 + d2, d3), memory_config: (70, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<182x1>>, >
shape: #ttnn.shape<1x416x14x14>
tensor<[1,416,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5824 + d1 * 14 + d2, d3), memory_config: (182, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x416x28x28>
tensor<[1,416,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 28 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x448x12x12>
tensor<[1,448,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 12 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x448x14x14>
tensor<[1,448,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 14 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x448x28x28>
tensor<[1,448,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 28 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x448x8x8>
tensor<[1,448,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 8 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<1x462x7x7>
tensor<[1,462,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3234 + d1 * 7 + d2, d3), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x46x28x28>
tensor<[1,46,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 28 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<1x4800x128>
tensor<[1,4800,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x480x10x10>
tensor<[1,480,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4800 + d1 * 10 + d2, d3), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x480x14x14>
tensor<[1,480,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 14 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<225x1>>, >
shape: #ttnn.shape<1x480x15x15>
tensor<[1,480,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7200 + d1 * 15 + d2, d3), memory_config: (225, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x1>>, >
shape: #ttnn.shape<1x480x20x20>
tensor<[1,480,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 20 + d2, d3), memory_config: (300, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x480x28x28>
tensor<[1,480,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 28 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x1>>, >
shape: #ttnn.shape<1x480x7x7>
tensor<[1,480,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 7 + d2, d3), memory_config: (105, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<21x1>>, >
shape: #ttnn.shape<1x48x14x14>
tensor<[1,48,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 672 + d1 * 14 + d2, d3), memory_config: (21, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x2>>, >
shape: #ttnn.shape<1x48x33x33>
tensor<[1,48,33,33,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1584 + d1 * 33 + d2, d3), memory_config: (50, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x2>>, >
shape: #ttnn.shape<1x48x38x38>
tensor<[1,48,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 38 + d2, d3), memory_config: (57, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x48x56x56>
tensor<[1,48,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 56 + d2, d3), memory_config: (84, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<11x1>>, >
shape: #ttnn.shape<1x48x7x7>
tensor<[1,48,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 336 + d1 * 7 + d2, d3), memory_config: (11, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x1>>, >
shape: #ttnn.shape<1x512x14x14>
tensor<[1,512,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 14 + d2, d3), memory_config: (224, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x1>>, >
shape: #ttnn.shape<1x512x16x16>
tensor<[1,512,16,16,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 16 + d2, d3), memory_config: (256, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x1>>, >
shape: #ttnn.shape<1x512x28x28>
tensor<[1,512,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 28 + d2, d3), memory_config: (448, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x512x32x32>
tensor<[1,512,32,32,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 32 + d2, d3), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<896x2>>, >
shape: #ttnn.shape<1x512x56x56>
tensor<[1,512,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 28672 + d1 * 56 + d2, d3), memory_config: (896, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x1>>, >
shape: #ttnn.shape<1x512x5x5>
tensor<[1,512,5,5,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 5 + d2, d3), memory_config: (80, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x1>>, >
shape: #ttnn.shape<1x512x7x7>
tensor<[1,512,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 7 + d2, d3), memory_config: (112, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x512x8x8>
tensor<[1,512,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 8 + d2, d3), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<281x1>>, >
shape: #ttnn.shape<1x528x17x17>
tensor<[1,528,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8976 + d1 * 17 + d2, d3), memory_config: (281, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<238x1>>, >
shape: #ttnn.shape<1x544x14x14>
tensor<[1,544,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7616 + d1 * 14 + d2, d3), memory_config: (238, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x56x14x14>
tensor<[1,56,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 14 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x2>>, >
shape: #ttnn.shape<1x56x48x48>
tensor<[1,56,48,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 48 + d2, d3), memory_config: (84, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<252x1>>, >
shape: #ttnn.shape<1x576x14x14>
tensor<[1,576,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8064 + d1 * 14 + d2, d3), memory_config: (252, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<342x1>>, >
shape: #ttnn.shape<1x576x19x19>
tensor<[1,576,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10944 + d1 * 19 + d2, d3), memory_config: (342, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x1>>, >
shape: #ttnn.shape<1x576x7x7>
tensor<[1,576,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 7 + d2, d3), memory_config: (126, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<51x1>>, >
shape: #ttnn.shape<1x58x28x28>
tensor<[1,58,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1624 + d1 * 28 + d2, d3), memory_config: (51, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5>
tensor<[1,5,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<266x1>>, >
shape: #ttnn.shape<1x608x14x14>
tensor<[1,608,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8512 + d1 * 14 + d2, d3), memory_config: (266, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<53x1>>, >
shape: #ttnn.shape<1x60x28x28>
tensor<[1,60,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1680 + d1 * 28 + d2, d3), memory_config: (53, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<280x1>>, >
shape: #ttnn.shape<1x640x14x14>
tensor<[1,640,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8960 + d1 * 14 + d2, d3), memory_config: (280, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<224x4>>, >
shape: #ttnn.shape<1x64x112x112>
tensor<[1,64,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7168 + d1 * 112 + d2, d3), memory_config: (224, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<240x5>>, >
shape: #ttnn.shape<1x64x120x160>
tensor<[1,64,120,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7680 + d1 * 120 + d2, d3), memory_config: (240, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<256x4>>, >
shape: #ttnn.shape<1x64x128x128>
tensor<[1,64,128,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8192 + d1 * 128 + d2, d3), memory_config: (256, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x5>>, >
shape: #ttnn.shape<1x64x147x147>
tensor<[1,64,147,147,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 147 + d2, d3), memory_config: (294, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x1>>, >
shape: #ttnn.shape<1x64x14x14>
tensor<[1,64,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 14 + d2, d3), memory_config: (28, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<300x5>>, >
shape: #ttnn.shape<1x64x150x150>
tensor<[1,64,150,150,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9600 + d1 * 150 + d2, d3), memory_config: (300, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<320x5>>, >
shape: #ttnn.shape<1x64x160x160>
tensor<[1,64,160,160,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10240 + d1 * 160 + d2, d3), memory_config: (320, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1x1>
tensor<[1,64,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 64 + d1 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<448x7>>, >
shape: #ttnn.shape<1x64x224x224>
tensor<[1,64,224,224,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14336 + d1 * 224 + d2, d3), memory_config: (448, 7, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x8>>, >
shape: #ttnn.shape<1x64x256x256>
tensor<[1,64,256,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16384 + d1 * 256 + d2, d3), memory_config: (512, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<56x1>>, >
shape: #ttnn.shape<1x64x28x28>
tensor<[1,64,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1792 + d1 * 28 + d2, d3), memory_config: (56, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<4x1>>, >
shape: #ttnn.shape<1x64x2x2>
tensor<[1,64,2,2,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 128 + d1 * 2 + d2, d3), memory_config: (4, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x2>>, >
shape: #ttnn.shape<1x64x30x40>
tensor<[1,64,30,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 30 + d2, d3), memory_config: (60, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<70x2>>, >
shape: #ttnn.shape<1x64x35x35>
tensor<[1,64,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2240 + d1 * 35 + d2, d3), memory_config: (70, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<112x2>>, >
shape: #ttnn.shape<1x64x56x56>
tensor<[1,64,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3584 + d1 * 56 + d2, d3), memory_config: (112, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<120x3>>, >
shape: #ttnn.shape<1x64x60x80>
tensor<[1,64,60,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3840 + d1 * 60 + d2, d3), memory_config: (120, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x64x64x64>
tensor<[1,64,64,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4096 + d1 * 64 + d2, d3), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<146x3>>, >
shape: #ttnn.shape<1x64x73x73>
tensor<[1,64,73,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4672 + d1 * 73 + d2, d3), memory_config: (146, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<160x3>>, >
shape: #ttnn.shape<1x64x80x80>
tensor<[1,64,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5120 + d1 * 80 + d2, d3), memory_config: (160, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<1x65536x192>
tensor<[1,65536,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x672x10x10>
tensor<[1,672,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 10 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<294x1>>, >
shape: #ttnn.shape<1x672x14x14>
tensor<[1,672,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9408 + d1 * 14 + d2, d3), memory_config: (294, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<315x1>>, >
shape: #ttnn.shape<1x672x15x15>
tensor<[1,672,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10080 + d1 * 15 + d2, d3), memory_config: (315, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x672x20x20>
tensor<[1,672,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 20 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<504x1>>, >
shape: #ttnn.shape<1x672x24x24>
tensor<[1,672,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16128 + d1 * 24 + d2, d3), memory_config: (504, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<588x1>>, >
shape: #ttnn.shape<1x672x28x28>
tensor<[1,672,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 18816 + d1 * 28 + d2, d3), memory_config: (588, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1176x2>>, >
shape: #ttnn.shape<1x672x56x56>
tensor<[1,672,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 37632 + d1 * 56 + d2, d3), memory_config: (1176, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<147x1>>, >
shape: #ttnn.shape<1x672x7x7>
tensor<[1,672,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4704 + d1 * 7 + d2, d3), memory_config: (147, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x1>>, >
shape: #ttnn.shape<1x672x8x8>
tensor<[1,672,8,8,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 8 + d2, d3), memory_config: (168, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x1>>, >
shape: #ttnn.shape<1x68x14x14>
tensor<[1,68,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 952 + d1 * 14 + d2, d3), memory_config: (30, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<119x2>>, >
shape: #ttnn.shape<1x68x56x56>
tensor<[1,68,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3808 + d1 * 56 + d2, d3), memory_config: (119, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<609x1>>, >
shape: #ttnn.shape<1x696x28x28>
tensor<[1,696,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 19488 + d1 * 28 + d2, d3), memory_config: (609, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1218x2>>, >
shape: #ttnn.shape<1x696x56x56>
tensor<[1,696,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 38976 + d1 * 56 + d2, d3), memory_config: (1218, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<308x1>>, >
shape: #ttnn.shape<1x704x14x14>
tensor<[1,704,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9856 + d1 * 14 + d2, d3), memory_config: (308, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<383x1>>, >
shape: #ttnn.shape<1x720x17x17>
tensor<[1,720,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12240 + d1 * 17 + d2, d3), memory_config: (383, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x720x9x9>
tensor<[1,720,9,9,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6480 + d1 * 9 + d2, d3), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<433x1>>, >
shape: #ttnn.shape<1x728x19x19>
tensor<[1,728,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13832 + d1 * 19 + d2, d3), memory_config: (433, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<865x2>>, >
shape: #ttnn.shape<1x728x38x38>
tensor<[1,728,38,38,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 27664 + d1 * 38 + d2, d3), memory_config: (865, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x72x14x14>
tensor<[1,72,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1008 + d1 * 14 + d2, d3), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<63x1>>, >
shape: #ttnn.shape<1x72x28x28>
tensor<[1,72,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2016 + d1 * 28 + d2, d3), memory_config: (63, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x2>>, >
shape: #ttnn.shape<1x72x40x40>
tensor<[1,72,40,40,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 40 + d2, d3), memory_config: (90, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<126x2>>, >
shape: #ttnn.shape<1x72x56x56>
tensor<[1,72,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 4032 + d1 * 56 + d2, d3), memory_config: (126, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<16x1>>, >
shape: #ttnn.shape<1x72x7x7>
tensor<[1,72,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 504 + d1 * 7 + d2, d3), memory_config: (16, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x3>>, >
shape: #ttnn.shape<1x72x80x80>
tensor<[1,72,80,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 80 + d2, d3), memory_config: (180, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<322x1>>, >
shape: #ttnn.shape<1x736x14x14>
tensor<[1,736,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10304 + d1 * 14 + d2, d3), memory_config: (322, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x1>>, >
shape: #ttnn.shape<1x768x14x14>
tensor<[1,768,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 14 + d2, d3), memory_config: (336, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<69x1>>, >
shape: #ttnn.shape<1x78x28x28>
tensor<[1,78,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2184 + d1 * 28 + d2, d3), memory_config: (69, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<1x7x7x1024>
tensor<[1,7,7,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x7x7x2048>
tensor<[1,7,7,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<350x1>>, >
shape: #ttnn.shape<1x800x14x14>
tensor<[1,800,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11200 + d1 * 14 + d2, d3), memory_config: (350, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x80x10x10>
tensor<[1,80,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 800 + d1 * 10 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<35x1>>, >
shape: #ttnn.shape<1x80x14x14>
tensor<[1,80,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1120 + d1 * 14 + d2, d3), memory_config: (35, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x80x15x15>
tensor<[1,80,15,15,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1200 + d1 * 15 + d2, d3), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<50x1>>, >
shape: #ttnn.shape<1x80x20x20>
tensor<[1,80,20,20,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1600 + d1 * 20 + d2, d3), memory_config: (50, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<18x1>>, >
shape: #ttnn.shape<1x80x7x7>
tensor<[1,80,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 560 + d1 * 7 + d2, d3), memory_config: (18, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<255x1>>, >
shape: #ttnn.shape<1x816x10x10>
tensor<[1,816,10,10,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 8160 + d1 * 10 + d2, d3), memory_config: (255, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<485x1>>, >
shape: #ttnn.shape<1x816x19x19>
tensor<[1,816,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 15504 + d1 * 19 + d2, d3), memory_config: (485, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<364x1>>, >
shape: #ttnn.shape<1x832x14x14>
tensor<[1,832,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11648 + d1 * 14 + d2, d3), memory_config: (364, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<378x1>>, >
shape: #ttnn.shape<1x864x14x14>
tensor<[1,864,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12096 + d1 * 14 + d2, d3), memory_config: (378, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x88x17x17>
tensor<[1,88,17,17,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1496 + d1 * 17 + d2, d3), memory_config: (47, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<392x1>>, >
shape: #ttnn.shape<1x896x14x14>
tensor<[1,896,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12544 + d1 * 14 + d2, d3), memory_config: (392, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<196x1>>, >
shape: #ttnn.shape<1x896x7x7>
tensor<[1,896,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6272 + d1 * 7 + d2, d3), memory_config: (196, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<28x4>>, >
shape: #ttnn.shape<1x8x112x112>
tensor<[1,8,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 896 + d1 * 112 + d2, d3), memory_config: (28, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<406x1>>, >
shape: #ttnn.shape<1x928x14x14>
tensor<[1,928,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12992 + d1 * 14 + d2, d3), memory_config: (406, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<203x1>>, >
shape: #ttnn.shape<1x928x7x7>
tensor<[1,928,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6496 + d1 * 7 + d2, d3), memory_config: (203, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<41x1>>, >
shape: #ttnn.shape<1x92x14x14>
tensor<[1,92,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1288 + d1 * 14 + d2, d3), memory_config: (41, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x1>>, >
shape: #ttnn.shape<1x960x12x12>
tensor<[1,960,12,12,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 12 + d2, d3), memory_config: (360, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<420x1>>, >
shape: #ttnn.shape<1x960x14x14>
tensor<[1,960,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13440 + d1 * 14 + d2, d3), memory_config: (420, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<720x1>>, >
shape: #ttnn.shape<1x960x24x24>
tensor<[1,960,24,24,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 23040 + d1 * 24 + d2, d3), memory_config: (720, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<90x1>>, >
shape: #ttnn.shape<1x960x3x3>
tensor<[1,960,3,3,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2880 + d1 * 3 + d2, d3), memory_config: (90, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<210x1>>, >
shape: #ttnn.shape<1x960x7x7>
tensor<[1,960,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6720 + d1 * 7 + d2, d3), memory_config: (210, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<336x4>>, >
shape: #ttnn.shape<1x96x112x112>
tensor<[1,96,112,112,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10752 + d1 * 112 + d2, d3), memory_config: (336, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<360x4>>, >
shape: #ttnn.shape<1x96x120x120>
tensor<[1,96,120,120,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 11520 + d1 * 120 + d2, d3), memory_config: (360, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<390x5>>, >
shape: #ttnn.shape<1x96x130x130>
tensor<[1,96,130,130,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12480 + d1 * 130 + d2, d3), memory_config: (390, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<42x1>>, >
shape: #ttnn.shape<1x96x14x14>
tensor<[1,96,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1344 + d1 * 14 + d2, d3), memory_config: (42, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<57x1>>, >
shape: #ttnn.shape<1x96x19x19>
tensor<[1,96,19,19,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1824 + d1 * 19 + d2, d3), memory_config: (57, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<84x1>>, >
shape: #ttnn.shape<1x96x28x28>
tensor<[1,96,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2688 + d1 * 28 + d2, d3), memory_config: (84, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<105x2>>, >
shape: #ttnn.shape<1x96x35x35>
tensor<[1,96,35,35,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3360 + d1 * 35 + d2, d3), memory_config: (105, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<168x2>>, >
shape: #ttnn.shape<1x96x56x56>
tensor<[1,96,56,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5376 + d1 * 56 + d2, d3), memory_config: (168, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<180x2>>, >
shape: #ttnn.shape<1x96x60x60>
tensor<[1,96,60,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5760 + d1 * 60 + d2, d3), memory_config: (180, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<195x3>>, >
shape: #ttnn.shape<1x96x65x65>
tensor<[1,96,65,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6240 + d1 * 65 + d2, d3), memory_config: (195, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<213x3>>, >
shape: #ttnn.shape<1x96x71x71>
tensor<[1,96,71,71,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6816 + d1 * 71 + d2, d3), memory_config: (213, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<219x3>>, >
shape: #ttnn.shape<1x96x73x73>
tensor<[1,96,73,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7008 + d1 * 73 + d2, d3), memory_config: (219, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<86x1>>, >
shape: #ttnn.shape<1x98x28x28>
tensor<[1,98,28,28,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2744 + d1 * 28 + d2, d3), memory_config: (86, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<434x1>>, >
shape: #ttnn.shape<1x992x14x14>
tensor<[1,992,14,14,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13888 + d1 * 14 + d2, d3), memory_config: (434, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<217x1>>, >
shape: #ttnn.shape<1x992x7x7>
tensor<[1,992,7,7,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6944 + d1 * 7 + d2, d3), memory_config: (217, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x9x1024>
tensor<[1,9,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x9x2048>
tensor<[1,9,2048,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x9x768>
tensor<[1,9,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x102>>, >
shape: #ttnn.shape<3234>
tensor<[3234,f32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 102, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<102x1>>, >
shape: #ttnn.shape<3234x1>
tensor<[3234,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (102, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x10>>, >
shape: #ttnn.shape<3x320x320>
tensor<[3,320,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 320 + d1, d2), memory_config: (30, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<45>
tensor<[45,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 2, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<4x49x49>
tensor<[4,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5>
tensor<[5,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<64x49x49>
tensor<[64,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x1x256>
tensor<[920,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1>
tensor<[1,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x14x3072>
tensor<[1,14,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x15x1024>
tensor<[1,15,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 15 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x1x3072>
tensor<[1,1,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x1x4096>
tensor<[1,1,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x192>>, >
shape: #ttnn.shape<1x32x6144>
tensor<[1,32,6144,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 192, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x96>>, >
shape: #ttnn.shape<1x45x3072>
tensor<[1,45,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x5x4096>
tensor<[1,5,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x7x3072>
tensor<[1,7,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x512>>, >
shape: #ttnn.shape<1x9x16384>
tensor<[1,9,16384,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 512, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x96>>, >
shape: #ttnn.shape<1x9x3072>
tensor<[1,9,3072,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 96, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x256>>, >
shape: #ttnn.shape<1x9x8192>
tensor<[1,9,8192,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 256, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5>
tensor<[5,ui32]>mapping_from: (d0), mapping_to: (0, d0), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x1>
tensor<[5,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<5x5>
tensor<[5,5,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1024>
tensor<[1,1024,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x48>>, >
shape: #ttnn.shape<1x1024x1536>
tensor<[1,1024,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x5>>, >
shape: #ttnn.shape<1x1024x160>
tensor<[1,1024,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x96>>, >
shape: #ttnn.shape<1x1024x3072>
tensor<[1,1024,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x20>>, >
shape: #ttnn.shape<1x1024x640>
tensor<[1,1024,640,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 20, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x1>>, >
shape: #ttnn.shape<1x1024x1>
tensor<[1,1024,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<32x24>>, >
shape: #ttnn.shape<1x1024x768>
tensor<[1,1024,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1024 + d1, d2), memory_config: (32, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10x1>
tensor<[1,10,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x10x768>
tensor<[1,10,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 10 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x1>>, >
shape: #ttnn.shape<1x1200x1>
tensor<[1,1200,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<38x10>>, >
shape: #ttnn.shape<1x1200x320>
tensor<[1,1200,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1200 + d1, d2), memory_config: (38, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x128>
tensor<[1,12,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12x1>
tensor<[1,12,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x12x768>
tensor<[1,12,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 12 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x1>>, >
shape: #ttnn.shape<1x1370x1>
tensor<[1,1370,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<43x40>>, >
shape: #ttnn.shape<1x1370x1280>
tensor<[1,1370,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1370 + d1, d2), memory_config: (43, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x1>>, >
shape: #ttnn.shape<1x1445x1>
tensor<[1,1445,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<46x6>>, >
shape: #ttnn.shape<1x1445x192>
tensor<[1,1445,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1445 + d1, d2), memory_config: (46, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x14x128>
tensor<[1,14,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x14x14x1>
tensor<[1,14,14,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x14x14x1024>
tensor<[1,14,14,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x14x14x1>
tensor<[1,14,14,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x16>>, >
shape: #ttnn.shape<1x14x14x512>
tensor<[1,14,14,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 196 + d1 * 14 + d2, d3), memory_config: (7, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14x1>
tensor<[1,14,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x14x768>
tensor<[1,14,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 14 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x1>>, >
shape: #ttnn.shape<1x1500x1>
tensor<[1,1500,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<47x24>>, >
shape: #ttnn.shape<1x1500x768>
tensor<[1,1500,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 1500 + d1, d2), memory_config: (47, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x1536>
tensor<[1,1536,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x1>
tensor<[1,16384,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x6>>, >
shape: #ttnn.shape<1x16384x192>
tensor<[1,16384,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x1>
tensor<[1,16384,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x32>
tensor<[1,16384,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x1>
tensor<[1,16384,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x12>>, >
shape: #ttnn.shape<1x16384x384>
tensor<[1,16384,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x1>>, >
shape: #ttnn.shape<1x16384x1>
tensor<[1,16384,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<512x24>>, >
shape: #ttnn.shape<1x16384x768>
tensor<[1,16384,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16384 + d1, d2), memory_config: (512, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16x1>
tensor<[1,16,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x16x768>
tensor<[1,16,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 16 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x1>>, >
shape: #ttnn.shape<1x19200x1>
tensor<[1,19200,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<600x2>>, >
shape: #ttnn.shape<1x19200x64>
tensor<[1,19200,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 19200 + d1, d2), memory_config: (600, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x196x1>
tensor<[1,196,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x196x768>
tensor<[1,196,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 196 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x197x1>
tensor<[1,197,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x32>>, >
shape: #ttnn.shape<1x197x1024>
tensor<[1,197,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x197x1>
tensor<[1,197,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x197x768>
tensor<[1,197,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 197 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x1x1024>
tensor<[1,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1>
tensor<[1,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x1x768>
tensor<[1,1,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x1>>, >
shape: #ttnn.shape<1x201x1>
tensor<[1,201,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x24>>, >
shape: #ttnn.shape<1x201x768>
tensor<[1,201,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 201 + d1, d2), memory_config: (7, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x1>>, >
shape: #ttnn.shape<1x2048x1>
tensor<[1,2048,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<64x24>>, >
shape: #ttnn.shape<1x2048x768>
tensor<[1,2048,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 2048 + d1, d2), memory_config: (64, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x32>>, >
shape: #ttnn.shape<1x256x1024>
tensor<[1,256,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x40>>, >
shape: #ttnn.shape<1x256x1280>
tensor<[1,256,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x48>>, >
shape: #ttnn.shape<1x256x1536>
tensor<[1,256,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x5>>, >
shape: #ttnn.shape<1x256x160>
tensor<[1,256,160,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 5, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x256x256>
tensor<[1,256,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x96>>, >
shape: #ttnn.shape<1x256x3072>
tensor<[1,256,3072,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 96, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x32>
tensor<[1,256,32,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x16>>, >
shape: #ttnn.shape<1x256x512>
tensor<[1,256,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x1>>, >
shape: #ttnn.shape<1x256x1>
tensor<[1,256,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x2>>, >
shape: #ttnn.shape<1x256x64>
tensor<[1,256,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 256 + d1, d2), memory_config: (8, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x1>>, >
shape: #ttnn.shape<1x257x1>
tensor<[1,257,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<9x24>>, >
shape: #ttnn.shape<1x257x768>
tensor<[1,257,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 257 + d1, d2), memory_config: (9, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25x1>
tensor<[1,25,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x25x768>
tensor<[1,25,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 25 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27x1>
tensor<[1,27,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x27x768>
tensor<[1,27,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 27 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x1>
tensor<[1,28,28,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x8>>, >
shape: #ttnn.shape<1x28x28x256>
tensor<[1,28,28,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x1>>, >
shape: #ttnn.shape<1x28x28x1>
tensor<[1,28,28,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x16>>, >
shape: #ttnn.shape<1x28x28x512>
tensor<[1,28,28,512,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 784 + d1 * 28 + d2, d3), memory_config: (25, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x300x1>
tensor<[1,300,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x4>>, >
shape: #ttnn.shape<1x300x128>
tensor<[1,300,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x300x1>
tensor<[1,300,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x10>>, >
shape: #ttnn.shape<1x300x320>
tensor<[1,300,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x300x1>
tensor<[1,300,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x16>>, >
shape: #ttnn.shape<1x300x512>
tensor<[1,300,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x1>>, >
shape: #ttnn.shape<1x300x1>
tensor<[1,300,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x2>>, >
shape: #ttnn.shape<1x300x64>
tensor<[1,300,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 300 + d1, d2), memory_config: (10, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x32>>, >
shape: #ttnn.shape<1x32x10x1024>
tensor<[1,32,10,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 10 + d2, d3), memory_config: (10, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<10x128>>, >
shape: #ttnn.shape<1x32x10x4096>
tensor<[1,32,10,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 320 + d1 * 10 + d2, d3), memory_config: (10, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1>
tensor<[1,32,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x48>>, >
shape: #ttnn.shape<1x32x1536>
tensor<[1,32,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 32 + d1, d2), memory_config: (1, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x32>>, >
shape: #ttnn.shape<1x32x20x1024>
tensor<[1,32,20,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x8>>, >
shape: #ttnn.shape<1x32x20x256>
tensor<[1,32,20,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<20x128>>, >
shape: #ttnn.shape<1x32x20x4096>
tensor<[1,32,20,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 640 + d1 * 20 + d2, d3), memory_config: (20, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x32>>, >
shape: #ttnn.shape<1x32x30x1024>
tensor<[1,32,30,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<30x128>>, >
shape: #ttnn.shape<1x32x30x4096>
tensor<[1,32,30,4096,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 960 + d1 * 30 + d2, d3), memory_config: (30, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x32>>, >
shape: #ttnn.shape<1x32x40x1024>
tensor<[1,32,40,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x8>>, >
shape: #ttnn.shape<1x32x40x256>
tensor<[1,32,40,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<40x2>>, >
shape: #ttnn.shape<1x32x40x64>
tensor<[1,32,40,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1280 + d1 * 40 + d2, d3), memory_config: (40, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x32>>, >
shape: #ttnn.shape<1x32x60x1024>
tensor<[1,32,60,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<60x8>>, >
shape: #ttnn.shape<1x32x60x256>
tensor<[1,32,60,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 1920 + d1 * 60 + d2, d3), memory_config: (60, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x8>>, >
shape: #ttnn.shape<1x32x80x256>
tensor<[1,32,80,256,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 80 + d2, d3), memory_config: (80, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x32x1x1>
tensor<[1,32,1,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<80x2>>, >
shape: #ttnn.shape<1x32x80x64>
tensor<[1,32,80,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 2560 + d1 * 80 + d2, d3), memory_config: (80, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x48>>, >
shape: #ttnn.shape<1x4096x1536>
tensor<[1,4096,1536,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 48, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x10>>, >
shape: #ttnn.shape<1x4096x320>
tensor<[1,4096,320,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 10, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x12>>, >
shape: #ttnn.shape<1x4096x384>
tensor<[1,4096,384,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 12, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x2>>, >
shape: #ttnn.shape<1x4096x64>
tensor<[1,4096,64,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x1>>, >
shape: #ttnn.shape<1x4096x1>
tensor<[1,4096,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<128x24>>, >
shape: #ttnn.shape<1x4096x768>
tensor<[1,4096,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4096 + d1, d2), memory_config: (128, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x45x1>
tensor<[1,45,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x45x768>
tensor<[1,45,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 45 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x1>>, >
shape: #ttnn.shape<1x4800x1>
tensor<[1,4800,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<150x4>>, >
shape: #ttnn.shape<1x4800x128>
tensor<[1,4800,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 4800 + d1, d2), memory_config: (150, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x50x1>
tensor<[1,50,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x24>>, >
shape: #ttnn.shape<1x50x768>
tensor<[1,50,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 50 + d1, d2), memory_config: (2, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x1>>, >
shape: #ttnn.shape<1x56x56x1>
tensor<[1,56,56,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x4>>, >
shape: #ttnn.shape<1x56x56x128>
tensor<[1,56,56,128,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 3136 + d1 * 56 + d2, d3), memory_config: (98, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5x1>
tensor<[1,5,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x5x1024>
tensor<[1,5,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 5 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x64x1>
tensor<[1,64,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x40>>, >
shape: #ttnn.shape<1x64x1280>
tensor<[1,64,1280,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 64 + d1, d2), memory_config: (2, 40, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x1>>, >
shape: #ttnn.shape<1x65536x1>
tensor<[1,65536,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2048x6>>, >
shape: #ttnn.shape<1x65536x192>
tensor<[1,65536,192,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 65536 + d1, d2), memory_config: (2048, 6, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6x1>
tensor<[1,6,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x6x1024>
tensor<[1,6,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 6 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1>
tensor<[1,1,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x768>
tensor<[1,768,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7x1>
tensor<[1,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x142>>, >
shape: #ttnn.shape<1x7x4544>
tensor<[1,7,4544,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 142, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7x1>
tensor<[1,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x7x768>
tensor<[1,7,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x7x7x1>
tensor<[1,7,7,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x32>>, >
shape: #ttnn.shape<1x7x7x1024>
tensor<[1,7,7,1024,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x1>>, >
shape: #ttnn.shape<1x7x7x1>
tensor<[1,7,7,1,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x64>>, >
shape: #ttnn.shape<1x7x7x2048>
tensor<[1,7,7,2048,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 49 + d1 * 7 + d2, d3), memory_config: (2, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8x1>
tensor<[1,8,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x8x768>
tensor<[1,8,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 8 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<1x9x1024>
tensor<[1,9,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x9x128>
tensor<[1,9,128,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<1x9x2048>
tensor<[1,9,2048,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x128>>, >
shape: #ttnn.shape<1x9x4096>
tensor<[1,9,4096,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 128, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9x1>
tensor<[1,9,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<1x9x768>
tensor<[1,9,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 9 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x7x1>
tensor<[2,7,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x16>>, >
shape: #ttnn.shape<2x7x512>
tensor<[2,7,512,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 7 + d1, d2), memory_config: (1, 16, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x1x1>
tensor<[4,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x32>>, >
shape: #ttnn.shape<4x1x1024>
tensor<[4,1,1024,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (1, 32, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x1>>, >
shape: #ttnn.shape<920x1x1>
tensor<[920,1,1,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<29x8>>, >
shape: #ttnn.shape<920x1x256>
tensor<[920,1,256,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 + d1, d2), memory_config: (29, 8, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<25x2>>, >
shape: #ttnn.shape<16x49x49>
tensor<[16,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (25, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x10>
tensor<[1,10,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x11>
tensor<[1,11,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x12>
tensor<[1,12,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x13>
tensor<[1,13,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<3x14>
tensor<[3,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x14>
tensor<[4,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<7x14>
tensor<[7,14,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x14>
tensor<[1,14,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x15>
tensor<[1,15,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x16>
tensor<[1,16,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x17>
tensor<[1,17,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x18>
tensor<[1,18,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x19>
tensor<[1,19,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x10x10>
tensor<[1,1,10,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 10 + d1 * 10 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x12x12>
tensor<[1,1,12,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 * 12 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x13x13>
tensor<[1,1,13,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 13 + d1 * 13 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x14x14>
tensor<[1,1,14,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 14 + d1 * 14 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x16x16>
tensor<[1,1,16,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 16 + d1 * 16 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x10>
tensor<[1,1,1,10,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x11>
tensor<[1,1,1,11,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x12>
tensor<[1,1,1,12,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x13>
tensor<[1,1,1,13,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x14>
tensor<[1,1,1,14,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x15>
tensor<[1,1,1,15,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x16>
tensor<[1,1,1,16,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x17>
tensor<[1,1,1,17,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x18>
tensor<[1,1,1,18,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x19>
tensor<[1,1,1,19,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x20>
tensor<[1,1,1,20,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x21>
tensor<[1,1,1,21,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x22>
tensor<[1,1,1,22,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x23>
tensor<[1,1,1,23,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x24>
tensor<[1,1,1,24,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x25>
tensor<[1,1,1,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x26>
tensor<[1,1,1,26,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x27>
tensor<[1,1,1,27,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x28>
tensor<[1,1,1,28,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x29>
tensor<[1,1,1,29,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x46>
tensor<[1,12,1,46,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x46>
tensor<[1,1,1,46,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x47>
tensor<[1,12,1,47,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x47>
tensor<[1,1,1,47,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x48>
tensor<[1,12,1,48,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x48>
tensor<[1,1,1,48,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x49>
tensor<[1,12,1,49,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x49>
tensor<[1,1,1,49,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x50>
tensor<[1,12,1,50,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x50>
tensor<[1,1,1,50,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x51>
tensor<[1,12,1,51,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x51>
tensor<[1,1,1,51,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x52>
tensor<[1,12,1,52,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x52>
tensor<[1,1,1,52,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x53>
tensor<[1,12,1,53,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x53>
tensor<[1,1,1,53,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x54>
tensor<[1,12,1,54,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x54>
tensor<[1,1,1,54,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x55>
tensor<[1,12,1,55,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x55>
tensor<[1,1,1,55,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x56>
tensor<[1,12,1,56,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x56>
tensor<[1,1,1,56,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x57>
tensor<[1,12,1,57,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x57>
tensor<[1,1,1,57,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x58>
tensor<[1,12,1,58,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x58>
tensor<[1,1,1,58,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x59>
tensor<[1,12,1,59,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x59>
tensor<[1,1,1,59,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x60>
tensor<[1,12,1,60,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x60>
tensor<[1,1,1,60,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x61>
tensor<[1,12,1,61,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x61>
tensor<[1,1,1,61,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x62>
tensor<[1,12,1,62,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x62>
tensor<[1,1,1,62,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x63>
tensor<[1,12,1,63,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x63>
tensor<[1,1,1,63,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x12x1x64>
tensor<[1,12,1,64,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<1x1x1x64>
tensor<[1,1,1,64,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x65>
tensor<[1,12,1,65,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x65>
tensor<[1,1,1,65,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x66>
tensor<[1,12,1,66,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x66>
tensor<[1,1,1,66,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x67>
tensor<[1,12,1,67,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x67>
tensor<[1,1,1,67,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x68>
tensor<[1,12,1,68,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x68>
tensor<[1,1,1,68,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x69>
tensor<[1,12,1,69,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x69>
tensor<[1,1,1,69,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x6>
tensor<[1,1,1,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x70>
tensor<[1,12,1,70,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x70>
tensor<[1,1,1,70,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x71>
tensor<[1,12,1,71,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x71>
tensor<[1,1,1,71,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x72>
tensor<[1,12,1,72,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x72>
tensor<[1,1,1,72,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x73>
tensor<[1,12,1,73,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x73>
tensor<[1,1,1,73,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x74>
tensor<[1,12,1,74,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x74>
tensor<[1,1,1,74,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x75>
tensor<[1,12,1,75,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x75>
tensor<[1,1,1,75,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x76>
tensor<[1,12,1,76,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x76>
tensor<[1,1,1,76,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x77>
tensor<[1,12,1,77,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x77>
tensor<[1,1,1,77,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x78>
tensor<[1,12,1,78,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x78>
tensor<[1,1,1,78,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x79>
tensor<[1,12,1,79,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x79>
tensor<[1,1,1,79,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x7>
tensor<[1,1,1,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x80>
tensor<[1,12,1,80,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
NameInput ShapesInput LayoutsAttributesOutput ShapesOutput LayoutsPCCATOL
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x80>
tensor<[1,1,1,80,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x81>
tensor<[1,12,1,81,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x81>
tensor<[1,1,1,81,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x82>
tensor<[1,12,1,82,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x82>
tensor<[1,1,1,82,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x83>
tensor<[1,12,1,83,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x83>
tensor<[1,1,1,83,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x84>
tensor<[1,12,1,84,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x84>
tensor<[1,1,1,84,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x85>
tensor<[1,12,1,85,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x85>
tensor<[1,1,1,85,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x86>
tensor<[1,12,1,86,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x86>
tensor<[1,1,1,86,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x87>
tensor<[1,12,1,87,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x87>
tensor<[1,1,1,87,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x88>
tensor<[1,12,1,88,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x88>
tensor<[1,1,1,88,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x89>
tensor<[1,12,1,89,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x89>
tensor<[1,1,1,89,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x8>
tensor<[1,1,1,8,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x90>
tensor<[1,12,1,90,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x90>
tensor<[1,1,1,90,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x91>
tensor<[1,12,1,91,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x91>
tensor<[1,1,1,91,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x92>
tensor<[1,12,1,92,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x92>
tensor<[1,1,1,92,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x93>
tensor<[1,12,1,93,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x93>
tensor<[1,1,1,93,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x94>
tensor<[1,12,1,94,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x94>
tensor<[1,1,1,94,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x95>
tensor<[1,12,1,95,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x95>
tensor<[1,1,1,95,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x12x1x96>
tensor<[1,12,1,96,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x3>>, >
shape: #ttnn.shape<1x1x1x96>
tensor<[1,1,1,96,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 3, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x97>
tensor<[1,12,1,97,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x97>
tensor<[1,1,1,97,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x98>
tensor<[1,12,1,98,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x98>
tensor<[1,1,1,98,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x12x1x99>
tensor<[1,12,1,99,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 12 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x4>>, >
shape: #ttnn.shape<1x1x1x99>
tensor<[1,1,1,99,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 4, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x1x9>
tensor<[1,1,1,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<8x8>>, >
shape: #ttnn.shape<1x1x256x256>
tensor<[1,1,256,256,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 256 + d1 * 256 + d2, d3), memory_config: (8, 8, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x25x25>
tensor<[1,1,25,25,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 25 + d1 * 25 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x32x32>
tensor<[1,1,32,32,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 32 + d1 * 32 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<17x2>>, >
shape: #ttnn.shape<1x12x45x45>
tensor<[1,12,45,45,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 540 + d1 * 45 + d2, d3), memory_config: (17, 2, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<1x1x45x45>
tensor<[1,1,45,45,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 45 + d1 * 45 + d2, d3), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x5x5>
tensor<[1,1,5,5,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 5 + d1 * 5 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x6x6>
tensor<[1,1,6,6,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 6 + d1 * 6 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x7x7>
tensor<[1,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x1x9x9>
tensor<[1,1,9,9,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 9 + d1 * 9 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x20>
tensor<[1,20,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x21>
tensor<[1,21,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x22>
tensor<[1,22,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x23>
tensor<[1,23,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x24>
tensor<[1,24,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x25>
tensor<[1,25,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x26>
tensor<[1,26,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x27>
tensor<[1,27,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<21x28>
tensor<[21,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<3x28>
tensor<[3,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x28>
tensor<[4,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x28>
tensor<[1,28,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x29>
tensor<[1,29,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1571>>, >
shape: #ttnn.shape<1x50257>
tensor<[1,50257,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1571, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<3x56>
tensor<[3,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<49x56>
tensor<[49,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x2>>, >
shape: #ttnn.shape<4x56>
tensor<[4,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x5>
tensor<[1,5,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x6>
tensor<[1,6,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x7>
tensor<[1,7,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x8>
tensor<[1,8,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<1x9>
tensor<[1,9,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<28x28>
tensor<[28,28,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<2x1x7x7>
tensor<[2,1,7,7,bf16]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 * 7 + d1 * 7 + d2, d3), memory_config: (1, 1, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<4x1x1x13>
tensor<[4,1,1,13,f32]>mapping_from: (d0, d1, d2, d3), mapping_to: (d0 + d1 + d2, d3), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<7x2>>, >
shape: #ttnn.shape<4x49x49>
tensor<[4,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (7, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<2x2>>, >
shape: #ttnn.shape<56x56>
tensor<[56,56,bf16]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (2, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<98x2>>, >
shape: #ttnn.shape<64x49x49>
tensor<[64,49,49,bf16]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 49 + d1, d2), memory_config: (98, 2, 'tile<32x32, bf16>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<6x6>
tensor<[6,6,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x1>
tensor<[8,1,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x64>>, >
shape: #ttnn.shape<8x2048>
tensor<[8,2048,f32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 64, 'tile<32x32, f32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x1>>, >
shape: #ttnn.shape<8x2>
tensor<[8,2,ui32]>mapping_from: (d0, d1), mapping_to: (d0, d1), memory_config: (1, 1, 'tile<32x32, u32>', 'dram')nannan
ttnn.empty!tt.device<#device>dtype: #tt.supportedDataTypes
layout: #ttnn.layout
memory_config: #ttnn.memory_config<#dram, <<1x24>>, >
shape: #ttnn.shape<2x13x768>
tensor<[2,13,768,f32]>mapping_from: (d0, d1, d2), mapping_to: (d0 * 13 + d1, d2), memory_config: (1, 24, 'tile<32x32, f32>', 'dram')nannan