90 lines
3.4 KiB
MLIR
90 lines
3.4 KiB
MLIR
// RUN: mlir-opt %s --stage-sparse-ops --lower-sparse-ops-to-foreach --canonicalize --cse | FileCheck %s
|
|
|
|
#SparseVector64 = #sparse_tensor.encoding<{
|
|
map = (d0) -> (d0 : compressed),
|
|
posWidth = 64,
|
|
crdWidth = 64
|
|
}>
|
|
|
|
#SparseVector32 = #sparse_tensor.encoding<{
|
|
map = (d0) -> (d0 : compressed),
|
|
posWidth = 32,
|
|
crdWidth = 32
|
|
}>
|
|
|
|
#SparseVector = #sparse_tensor.encoding<{
|
|
map = (d0) -> (d0 : compressed)
|
|
}>
|
|
|
|
#SortedCOO2D = #sparse_tensor.encoding<{
|
|
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton),
|
|
}>
|
|
|
|
#SortedCOO3D = #sparse_tensor.encoding<{
|
|
map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton)
|
|
|
|
}>
|
|
|
|
#TsssPermuted = #sparse_tensor.encoding<{
|
|
map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed)
|
|
}>
|
|
|
|
#COOSlice = #sparse_tensor.encoding<{
|
|
map = (d0 : #sparse_tensor<slice(2, 2, 1)>, d1 : #sparse_tensor<slice(12, 13, 1)>) -> (d0 : compressed(nonunique), d1 : singleton)
|
|
}>
|
|
|
|
// CHECK-LABEL: func.func @sparse_nop_convert
|
|
// CHECK-NEXT: return
|
|
func.func @sparse_nop_convert(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
|
|
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
|
|
return %0 : tensor<64xf32, #SparseVector>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_hidden_nop_cast
|
|
// CHECK-NEXT: sparse_tensor.convert
|
|
// CHECK-NEXT: return
|
|
func.func @sparse_hidden_nop_cast(%arg0: tensor<32xf32, #SparseVector>) -> tensor<?xf32, #SparseVector> {
|
|
%0 = sparse_tensor.convert %arg0 : tensor<32xf32, #SparseVector> to tensor<?xf32, #SparseVector>
|
|
return %0 : tensor<?xf32, #SparseVector>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_convert_1d_ss(
|
|
// CHECK-NEXT: sparse_tensor.convert
|
|
// CHECK-NEXT: return
|
|
func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
|
|
%0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
|
|
return %0 : tensor<?xf32, #SparseVector32>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_convert(
|
|
// CHECK-NEXT: sparse_tensor.convert
|
|
// CHECK-NEXT: return
|
|
func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
|
|
%0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
|
|
return %0 : tensor<?xf32, #SparseVector32>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_convert_permuted
|
|
// CHECK: sparse_tensor.foreach
|
|
// CHECK: tensor.insert
|
|
// CHECK: sparse_tensor.load
|
|
// CHECK: sparse_tensor.reorder_coo
|
|
// CHECK: sparse_tensor.foreach
|
|
// CHECK: tensor.insert
|
|
// CHECK: sparse_tensor.load
|
|
// CHECK: return
|
|
func.func @sparse_convert_permuted(%arg0: tensor<?x?x?xf32, #SortedCOO3D>) -> tensor<?x?x?xf32, #TsssPermuted> {
|
|
%0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf32, #SortedCOO3D> to tensor<?x?x?xf32, #TsssPermuted>
|
|
return %0 : tensor<?x?x?xf32, #TsssPermuted>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_convert_slice
|
|
// CHECK: sparse_tensor.foreach
|
|
// CHECK: tensor.insert
|
|
// CHECK: sparse_tensor.load
|
|
// CHECK-NOT: sparse_tensor.reorder_coo
|
|
// CHECK: return
|
|
func.func @sparse_convert_slice(%arg0: tensor<2x13xi32, #COOSlice>) -> (tensor<2x13xi32, #SortedCOO2D>) {
|
|
%0 = sparse_tensor.convert %arg0 : tensor<2x13xi32, #COOSlice> to tensor<2x13xi32, #SortedCOO2D>
|
|
return %0 : tensor<2x13xi32, #SortedCOO2D>
|
|
}
|