; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; ; INDEX (IMMEDIATES) ; define @index_ii_i8() { ; CHECK-LABEL: index_ii_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.b, #-16, #15 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv16i8(i8 -16, i8 15) ret %out } define @index_ii_i16() { ; CHECK-LABEL: index_ii_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.h, #15, #-16 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv8i16(i16 15, i16 -16) ret %out } define @index_ii_i32() { ; CHECK-LABEL: index_ii_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, #-16, #15 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv4i32(i32 -16, i32 15) ret %out } define @index_ii_i64() { ; CHECK-LABEL: index_ii_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.d, #15, #-16 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv2i64(i64 15, i64 -16) ret %out } define @index_ii_range() { ; CHECK-LABEL: index_ii_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #16 // =0x10 ; CHECK-NEXT: mov x9, #-17 // =0xffffffffffffffef ; CHECK-NEXT: index z0.d, x9, x8 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv2i64(i64 -17, i64 16) ret %out } define @index_ii_range_combine(i16 %a) { ; CHECK-LABEL: index_ii_range_combine: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.h, #0, #8 ; CHECK-NEXT: orr z0.h, z0.h, #0x2 ; CHECK-NEXT: ret %val = insertelement poison, i16 2, i32 0 %val1 = shufflevector %val, poison, zeroinitializer %val2 = call @llvm.aarch64.sve.index.nxv8i16(i16 0, i16 2) %val3 = shl %val2, %val1 %out = add %val3, %val1 ret %out } ; ; INDEX (IMMEDIATE, SCALAR) ; define @index_ir_i8(i8 %a) { ; CHECK-LABEL: index_ir_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.b, #15, w0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv16i8(i8 15, i8 %a) ret %out } define @index_ir_i16(i16 %a) { ; CHECK-LABEL: index_ir_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.h, #-16, w0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv8i16(i16 -16, i16 %a) ret %out } define @index_ir_i32(i32 %a) { ; CHECK-LABEL: index_ir_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, #15, w0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv4i32(i32 15, i32 %a) ret %out } define @index_ir_i64(i64 %a) { ; CHECK-LABEL: index_ir_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.d, #-16, x0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv2i64(i64 -16, i64 %a) ret %out } define @index_ir_range(i32 %a) { ; CHECK-LABEL: index_ir_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #-17 // =0xffffffef ; CHECK-NEXT: index z0.s, w8, w0 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv4i32(i32 -17, i32 %a) ret %out } define @index_ir_range_combine(i32 %a) { ; CHECK-LABEL: index_ir_range_combine: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, #0, w0 ; CHECK-NEXT: ret %val = insertelement poison, i32 2, i32 0 %val1 = shufflevector %val, poison, zeroinitializer %tmp = call @llvm.aarch64.sve.index.nxv4i32(i32 2, i32 1) %tmp1 = sub %tmp, %val1 %val2 = insertelement poison, i32 %a, i32 0 %val3 = shufflevector %val2, poison, zeroinitializer %out = mul %tmp1, %val3 ret %out } ; ; INDEX (SCALAR, IMMEDIATE) ; define @index_ri_i8(i8 %a) { ; CHECK-LABEL: index_ri_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.b, w0, #-16 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv16i8(i8 %a, i8 -16) ret %out } define @index_ri_i16(i16 %a) { ; CHECK-LABEL: index_ri_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.h, w0, #15 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv8i16(i16 %a, i16 15) ret %out } define @index_ri_i32(i32 %a) { ; CHECK-LABEL: index_ri_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, w0, #-16 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv4i32(i32 %a, i32 -16) ret %out } define @index_ri_i64(i64 %a) { ; CHECK-LABEL: index_ri_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.d, x0, #15 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv2i64(i64 %a, i64 15) ret %out } define @index_ri_range(i16 %a) { ; CHECK-LABEL: index_ri_range: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #16 // =0x10 ; CHECK-NEXT: index z0.h, w0, w8 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv8i16(i16 %a, i16 16) ret %out } ; ; INDEX (SCALARS) ; define @index_rr_i8(i8 %a, i8 %b) { ; CHECK-LABEL: index_rr_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.b, w0, w1 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv16i8(i8 %a, i8 %b) ret %out } define @index_rr_i16(i16 %a, i16 %b) { ; CHECK-LABEL: index_rr_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.h, w0, w1 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv8i16(i16 %a, i16 %b) ret %out } define @index_rr_i32(i32 %a, i32 %b) { ; CHECK-LABEL: index_rr_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, w0, w1 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv4i32(i32 %a, i32 %b) ret %out } define @index_rr_i64(i64 %a, i64 %b) { ; CHECK-LABEL: index_rr_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.d, x0, x1 ; CHECK-NEXT: ret %out = call @llvm.aarch64.sve.index.nxv2i64(i64 %a, i64 %b) ret %out } define @index_rr_i32_combine(i32 %a, i32 %b) { ; CHECK-LABEL: index_rr_i32_combine: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, w0, w1 ; CHECK-NEXT: ret %val = insertelement poison, i32 %a, i32 0 %val1 = shufflevector %val, poison, zeroinitializer %val2 = insertelement poison, i32 %b, i32 0 %val3 = shufflevector %val2, poison, zeroinitializer %tmp = call @llvm.aarch64.sve.index.nxv4i32(i32 0, i32 1) %tmp1 = mul %tmp, %val3 %out = add %tmp1, %val1 ret %out } define @index_rr_i32_not_combine(i32 %a, i32 %b) { ; CHECK-LABEL: index_rr_i32_not_combine: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: index z0.s, #0, #1 ; CHECK-NEXT: mov z1.s, w0 ; CHECK-NEXT: mov z2.s, w1 ; CHECK-NEXT: mla z1.s, p0/m, z0.s, z2.s ; CHECK-NEXT: add z0.s, z1.s, z0.s ; CHECK-NEXT: ret %val = insertelement poison, i32 %a, i32 0 %val1 = shufflevector %val, poison, zeroinitializer %val2 = insertelement poison, i32 %b, i32 0 %val3 = shufflevector %val2, poison, zeroinitializer %tmp = call @llvm.aarch64.sve.index.nxv4i32(i32 0, i32 1) %tmp1 = mul %tmp, %val3 %tmp2 = add %tmp1, %val1 %out = add %tmp2, %tmp ret %out } declare @llvm.aarch64.sve.index.nxv16i8(i8, i8) declare @llvm.aarch64.sve.index.nxv8i16(i16, i16) declare @llvm.aarch64.sve.index.nxv4i32(i32, i32) declare @llvm.aarch64.sve.index.nxv2i64(i64, i64)