// RUN: mlir-opt %s -convert-linalg-to-affine-loops | FileCheck %s // Test that we can lower all the way to LLVM without crashing, don't check results here. // RUN: mlir-opt %s -convert-linalg-to-affine-loops -test-lower-to-llvm -o=/dev/null 2>&1 func.func @matmul(%arg0: memref, %M: index, %N: index, %K: index) { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %A = memref.view %arg0[%c0][%M, %K] : memref to memref %B = memref.view %arg0[%c0][%K, %N] : memref to memref %C = memref.view %arg0[%c0][%M, %N] : memref to memref linalg.matmul ins(%A, %B: memref, memref) outs(%C: memref) return } //----------------------------------------------------------------------------// // Named ops to loops. //----------------------------------------------------------------------------// func.func @named_batch_matmul(%A: memref, %B: memref, %C: memref) { linalg.batch_matmul ins(%A, %B: memref, memref) outs(%C : memref) return } // CHECK-LABEL: @named_batch_matmul // CHECK-SAME: %[[mA:[a-zA-Z0-9]+]]: memref // CHECK-SAME: %[[mB:[a-zA-Z0-9]+]]: memref // CHECK-SAME: %[[mC:[a-zA-Z0-9]+]]: memref // CHECK: %[[B:.*]] = memref.dim %[[mA]], %c0 : memref // CHECK: %[[M:.*]] = memref.dim %[[mA]], %c1 : memref // CHECK: %[[K:.*]] = memref.dim %[[mA]], %c2 : memref // CHECK: %[[N:.*]] = memref.dim %[[mB]], %c2 : memref // CHECK: affine.for %[[b:.*]] = {{.*}}0 to %[[B]] { // CHECK: affine.for %[[m:.*]] = {{.*}}0 to %[[M]] { // CHECK: affine.for %[[n:.*]] = {{.*}}0 to %[[N]] { // CHECK: affine.for %[[k:.*]] = {{.*}}0 to %[[K]] { // CHECK: %[[va:.*]] = affine.load %[[mA]][%[[b]], %[[m]], %[[k]]] : memref // CHECK: %[[vb:.*]] = affine.load %[[mB]][%[[b]], %[[k]], %[[n]]] : memref // CHECK: %[[vc:.*]] = affine.load %[[mC]][%[[b]], %[[m]], %[[n]]] : memref // CHECK: %[[inc:.*]] = arith.mulf %[[va]], %[[vb]] : f32 // CHECK: %[[res:.*]] = arith.addf %[[vc]], %[[inc]] : f32 // CHECK: affine.store %[[res]], %[[mC]][%[[b]], %[[m]], %[[n]]] : memref