// RUN: mlir-opt %s --stage-sparse-ops --lower-sparse-ops-to-foreach --canonicalize --cse | FileCheck %s #SparseVector64 = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 64, crdWidth = 64 }> #SparseVector32 = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 32, crdWidth = 32 }> #SparseVector = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> #SortedCOO2D = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton), }> #SortedCOO3D = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton) }> #TsssPermuted = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed) }> #COOSlice = #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor, d1 : #sparse_tensor) -> (d0 : compressed(nonunique), d1 : singleton) }> // CHECK-LABEL: func.func @sparse_nop_convert // CHECK-NEXT: return func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> { %0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector> return %0 : tensor<64xf32, #SparseVector> } // CHECK-LABEL: func.func @sparse_hidden_nop_cast // CHECK-NEXT: sparse_tensor.convert // CHECK-NEXT: return func.func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor return %0 : tensor } // CHECK-LABEL: func.func @sparse_convert_1d_ss( // CHECK-NEXT: sparse_tensor.convert // CHECK-NEXT: return func.func @sparse_convert_1d_ss(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor return %0 : tensor } // CHECK-LABEL: func.func @sparse_convert( // CHECK-NEXT: sparse_tensor.convert // CHECK-NEXT: return func.func @sparse_convert(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor return %0 : tensor } // CHECK-LABEL: func.func @sparse_convert_permuted // CHECK: sparse_tensor.foreach // CHECK: tensor.insert // CHECK: sparse_tensor.load // CHECK: sparse_tensor.reorder_coo // CHECK: sparse_tensor.foreach // CHECK: tensor.insert // CHECK: sparse_tensor.load // CHECK: return func.func @sparse_convert_permuted(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor return %0 : tensor } // CHECK-LABEL: func.func @sparse_convert_slice // CHECK: sparse_tensor.foreach // CHECK: tensor.insert // CHECK: sparse_tensor.load // CHECK-NOT: sparse_tensor.reorder_coo // CHECK: return func.func @sparse_convert_slice(%arg0: tensor<2x13xi32, #COOSlice>) -> (tensor<2x13xi32, #SortedCOO2D>) { %0 = sparse_tensor.convert %arg0 : tensor<2x13xi32, #COOSlice> to tensor<2x13xi32, #SortedCOO2D> return %0 : tensor<2x13xi32, #SortedCOO2D> }