; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mattr=+sve < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" ; ; ASR ; define @asr_i8( %pg, %a, %b) { ; CHECK-LABEL: asr_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.u.nxv16i8( %pg, %a, %b) ret %out } define @asr_i16( %pg, %a, %b) { ; CHECK-LABEL: asr_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.u.nxv8i16( %pg, %a, %b) ret %out } define @asr_i32( %pg, %a, %b) { ; CHECK-LABEL: asr_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.u.nxv4i32( %pg, %a, %b) ret %out } define @asr_i64( %pg, %a, %b) { ; CHECK-LABEL: asr_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.asr.u.nxv2i64( %pg, %a, %b) ret %out } ; ; ASR (immediate) ; define @asr_imm_i8( %pg, %a) { ; CHECK-LABEL: asr_imm_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.b, z0.b, #3 ; CHECK-NEXT: ret %imm = insertelement undef, i8 3, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.u.nxv16i8( %pg, %a, %imm.splat) ret %out } define @asr_imm_i16( %pg, %a) { ; CHECK-LABEL: asr_imm_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.h, z0.h, #4 ; CHECK-NEXT: ret %imm = insertelement undef, i16 4, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.u.nxv8i16( %pg, %a, %imm.splat) ret %out } define @asr_imm_i32( %pg, %a) { ; CHECK-LABEL: asr_imm_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.s, z0.s, #5 ; CHECK-NEXT: ret %imm = insertelement undef, i32 5, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.u.nxv4i32( %pg, %a, %imm.splat) ret %out } define @asr_imm_i64( %pg, %a) { ; CHECK-LABEL: asr_imm_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: asr z0.d, z0.d, #6 ; CHECK-NEXT: ret %imm = insertelement undef, i64 6, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.asr.u.nxv2i64( %pg, %a, %imm.splat) ret %out } ; ; LSL ; define @lsl_i8( %pg, %a, %b) { ; CHECK-LABEL: lsl_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.u.nxv16i8( %pg, %a, %b) ret %out } define @lsl_i16( %pg, %a, %b) { ; CHECK-LABEL: lsl_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.u.nxv8i16( %pg, %a, %b) ret %out } define @lsl_i32( %pg, %a, %b) { ; CHECK-LABEL: lsl_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.u.nxv4i32( %pg, %a, %b) ret %out } define @lsl_i64( %pg, %a, %b) { ; CHECK-LABEL: lsl_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsl.u.nxv2i64( %pg, %a, %b) ret %out } ; ; LSL (immediate) ; define @lsl_imm_i8( %pg, %a) { ; CHECK-LABEL: lsl_imm_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.b, z0.b, #7 ; CHECK-NEXT: ret %imm = insertelement undef, i8 7, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.u.nxv16i8( %pg, %a, %imm.splat) ret %out } define @lsl_imm_i16( %pg, %a) { ; CHECK-LABEL: lsl_imm_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.h, z0.h, #8 ; CHECK-NEXT: ret %imm = insertelement undef, i16 8, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.u.nxv8i16( %pg, %a, %imm.splat) ret %out } define @lsl_imm_i32( %pg, %a) { ; CHECK-LABEL: lsl_imm_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.s, z0.s, #9 ; CHECK-NEXT: ret %imm = insertelement undef, i32 9, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.u.nxv4i32( %pg, %a, %imm.splat) ret %out } define @lsl_imm_i64( %pg, %a) { ; CHECK-LABEL: lsl_imm_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl z0.d, z0.d, #10 ; CHECK-NEXT: ret %imm = insertelement undef, i64 10, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsl.u.nxv2i64( %pg, %a, %imm.splat) ret %out } ; ; LSR ; define @lsr_i8( %pg, %a, %b) { ; CHECK-LABEL: lsr_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.u.nxv16i8( %pg, %a, %b) ret %out } define @lsr_i16( %pg, %a, %b) { ; CHECK-LABEL: lsr_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.u.nxv8i16( %pg, %a, %b) ret %out } define @lsr_i32( %pg, %a, %b) { ; CHECK-LABEL: lsr_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.u.nxv4i32( %pg, %a, %b) ret %out } define @lsr_i64( %pg, %a, %b) { ; CHECK-LABEL: lsr_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.lsr.u.nxv2i64( %pg, %a, %b) ret %out } ; ; LSR (immediate) ; define @lsr_imm_i8( %pg, %a) { ; CHECK-LABEL: lsr_imm_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.b, z0.b, #8 ; CHECK-NEXT: ret %imm = insertelement undef, i8 8, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.u.nxv16i8( %pg, %a, %imm.splat) ret %out } define @lsr_imm_i16( %pg, %a) { ; CHECK-LABEL: lsr_imm_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.h, z0.h, #12 ; CHECK-NEXT: ret %imm = insertelement undef, i16 12, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.u.nxv8i16( %pg, %a, %imm.splat) ret %out } define @lsr_imm_i32( %pg, %a) { ; CHECK-LABEL: lsr_imm_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.s, z0.s, #13 ; CHECK-NEXT: ret %imm = insertelement undef, i32 13, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.u.nxv4i32( %pg, %a, %imm.splat) ret %out } define @lsr_imm_i64( %pg, %a) { ; CHECK-LABEL: lsr_imm_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: lsr z0.d, z0.d, #14 ; CHECK-NEXT: ret %imm = insertelement undef, i64 14, i32 0 %imm.splat = shufflevector %imm, undef, zeroinitializer %out = call @llvm.aarch64.sve.lsr.u.nxv2i64( %pg, %a, %imm.splat) ret %out } declare @llvm.aarch64.sve.asr.u.nxv16i8(, , ) declare @llvm.aarch64.sve.asr.u.nxv8i16(, , ) declare @llvm.aarch64.sve.asr.u.nxv4i32(, , ) declare @llvm.aarch64.sve.asr.u.nxv2i64(, , ) declare @llvm.aarch64.sve.lsl.u.nxv16i8(, , ) declare @llvm.aarch64.sve.lsl.u.nxv8i16(, , ) declare @llvm.aarch64.sve.lsl.u.nxv4i32(, , ) declare @llvm.aarch64.sve.lsl.u.nxv2i64(, , ) declare @llvm.aarch64.sve.lsr.u.nxv16i8(, , ) declare @llvm.aarch64.sve.lsr.u.nxv8i16(, , ) declare @llvm.aarch64.sve.lsr.u.nxv4i32(, , ) declare @llvm.aarch64.sve.lsr.u.nxv2i64(, , )