// RUN: mlir-opt -split-input-file -convert-gpu-to-spirv -verify-diagnostics %s -o - | FileCheck %s module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFAdd %[[ARG]] : f32 %reduced = gpu.all_reduce add %arg uniform {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.all_reduce add %arg {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupIAdd %[[ARG]] : i32 %reduced = gpu.all_reduce add %arg uniform {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %[[ARG]] : i32 %reduced = gpu.all_reduce add %arg {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFAdd %[[ARG]] : f32 %reduced = gpu.subgroup_reduce add %arg uniform : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Subgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.subgroup_reduce add %arg : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupIAdd %[[ARG]] : i32 %reduced = gpu.subgroup_reduce add %arg uniform : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %[[ARG]] : i32 %reduced = gpu.subgroup_reduce add %arg : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.KHR.GroupFMul %[[ARG]] : f32 %reduced = gpu.all_reduce mul %arg uniform {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.all_reduce mul %arg {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.KHR.GroupIMul %[[ARG]] : i32 %reduced = gpu.all_reduce mul %arg uniform {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %[[ARG]] : i32 %reduced = gpu.all_reduce mul %arg {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.KHR.GroupFMul %[[ARG]] : f32 %reduced = gpu.subgroup_reduce mul %arg uniform : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMul "Subgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.subgroup_reduce mul %arg : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.KHR.GroupIMul %[[ARG]] : i32 %reduced = gpu.subgroup_reduce mul %arg uniform : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformIMul "Subgroup" "Reduce" %[[ARG]] : i32 %reduced = gpu.subgroup_reduce mul %arg : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMin %[[ARG]] : f32 %reduced = gpu.all_reduce minnumf %arg uniform {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.all_reduce minnumf %arg {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupSMin %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupUMin %[[ARG]] : i32 %r0 = gpu.all_reduce minsi %arg uniform {} : (i32) -> (i32) %r1 = gpu.all_reduce minui %arg uniform {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %[[ARG]] : i32 %r0 = gpu.all_reduce minsi %arg {} : (i32) -> (i32) %r1 = gpu.all_reduce minui %arg {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMin %[[ARG]] : f32 %reduced = gpu.subgroup_reduce minnumf %arg uniform : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Subgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.subgroup_reduce minnumf %arg : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupSMin %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupUMin %[[ARG]] : i32 %r0 = gpu.subgroup_reduce minsi %arg uniform : (i32) -> (i32) %r1 = gpu.subgroup_reduce minui %arg uniform : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformSMin "Subgroup" "Reduce" %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupNonUniformUMin "Subgroup" "Reduce" %[[ARG]] : i32 %r0 = gpu.subgroup_reduce minsi %arg : (i32) -> (i32) %r1 = gpu.subgroup_reduce minui %arg : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMax %[[ARG]] : f32 %reduced = gpu.all_reduce maxnumf %arg uniform {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.all_reduce maxnumf %arg {} : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupSMax %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupUMax %[[ARG]] : i32 %r0 = gpu.all_reduce maxsi %arg uniform {} : (i32) -> (i32) %r1 = gpu.all_reduce maxui %arg uniform {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %[[ARG]] : i32 %r0 = gpu.all_reduce maxsi %arg {} : (i32) -> (i32) %r1 = gpu.all_reduce maxui %arg {} : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMax %[[ARG]] : f32 %reduced = gpu.subgroup_reduce maxnumf %arg uniform : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[ARG]] : f32 %reduced = gpu.subgroup_reduce maxnumf %arg : (f32) -> (f32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupSMax %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupUMax %[[ARG]] : i32 %r0 = gpu.subgroup_reduce maxsi %arg uniform : (i32) -> (i32) %r1 = gpu.subgroup_reduce maxui %arg uniform : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %[[ARG]] : i32 // CHECK: %{{.*}} = spirv.GroupNonUniformUMax "Subgroup" "Reduce" %[[ARG]] : i32 %r0 = gpu.subgroup_reduce maxsi %arg : (i32) -> (i32) %r1 = gpu.subgroup_reduce maxui %arg : (i32) -> (i32) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { // CHECK-LABEL: spirv.func @test // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : vector<1xi32>) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %[[ARG]] : i32 %r0 = gpu.subgroup_reduce maxsi %arg : (vector<1xi32>) -> (vector<1xi32>) gpu.return } } } // ----- // TODO: Handle boolean reductions. module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @add(%arg : i1) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce add %arg : (i1) -> (i1) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @mul(%arg : i1) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce mul %arg : (i1) -> (i1) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @minsi(%arg : i1) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce minsi %arg : (i1) -> (i1) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @minui(%arg : i1) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce minui %arg : (i1) -> (i1) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @maxsi(%arg : i1) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce maxsi %arg : (i1) -> (i1) gpu.return } } } // ----- module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @maxui(%arg : i1) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce maxui %arg : (i1) -> (i1) gpu.return } } } // ----- // Vector reductions need to be lowered to scalar reductions first. module attributes { gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce, #spirv.resource_limits<>> } { gpu.module @kernels { gpu.func @maxui(%arg : vector<2xi32>) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // expected-error @+1 {{failed to legalize operation 'gpu.subgroup_reduce'}} %r0 = gpu.subgroup_reduce maxui %arg : (vector<2xi32>) -> (vector<2xi32>) gpu.return } } }