; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=aarch64-linux-unknown | FileCheck %s ; Ensure we use a "vscale x 4" wide scatter for the maximum supported offset. define void @scatter_i8_index_offset_maximum(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_i8_index_offset_maximum: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #33554431 // =0x1ffffff ; CHECK-NEXT: index z1.s, #0, w8 ; CHECK-NEXT: add x8, x0, x1 ; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, sxtw] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %t2 = insertelement undef, i64 33554431, i32 0 %t3 = shufflevector %t2, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t4 = mul %t3, %step %t5 = add %t1, %t4 %t6 = getelementptr i8, ptr %base, %t5 call void @llvm.masked.scatter.nxv4i8( %data, %t6, i32 2, %pg) ret void } ; Ensure we use a "vscale x 4" wide scatter for the minimum supported offset. define void @scatter_i16_index_offset_minimum(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_i16_index_offset_minimum: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #-33554432 // =0xfe000000 ; CHECK-NEXT: index z1.s, #0, w8 ; CHECK-NEXT: add x8, x0, x1, lsl #1 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw #1] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %t2 = insertelement undef, i64 -33554432, i32 0 %t3 = shufflevector %t2, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t4 = mul %t3, %step %t5 = add %t1, %t4 %t6 = getelementptr i16, ptr %base, %t5 call void @llvm.masked.scatter.nxv4i16( %data, %t6, i32 2, %pg) ret void } ; Ensure we use a "vscale x 4" gather for an offset in the limits of 32 bits. define @gather_i8_index_offset_8(ptr %base, i64 %offset, %pg) #0 { ; CHECK-LABEL: gather_i8_index_offset_8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, #0, #1 ; CHECK-NEXT: add x8, x0, x1 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw] ; CHECK-NEXT: ret %splat.insert0 = insertelement undef, i64 %offset, i32 0 %splat0 = shufflevector %splat.insert0, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %splat.insert1 = insertelement undef, i64 1, i32 0 %splat1 = shufflevector %splat.insert1, undef, zeroinitializer %t1 = mul %splat1, %step %t2 = add %splat0, %t1 %t3 = getelementptr i8, ptr %base, %t2 %load = call @llvm.masked.gather.nxv4i8( %t3, i32 4, %pg, undef) ret %load } ;; Negative tests ; Ensure we don't use a "vscale x 4" scatter. Cannot prove that variable stride ; will not wrap when shrunk to be i32 based. define void @scatter_f16_index_offset_var(ptr %base, i64 %offset, i64 %scale, %pg, %data) #0 { ; CHECK-LABEL: scatter_f16_index_offset_var: ; CHECK: // %bb.0: ; CHECK-NEXT: index z1.d, #0, #1 ; CHECK-NEXT: ptrue p1.d ; CHECK-NEXT: mov z2.d, x1 ; CHECK-NEXT: movprfx z4, z2 ; CHECK-NEXT: mla z4.d, p1/m, z1.d, z2.d ; CHECK-NEXT: punpklo p2.h, p0.b ; CHECK-NEXT: uunpklo z3.d, z0.s ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: incd z1.d ; CHECK-NEXT: st1h { z3.d }, p2, [x0, z4.d, lsl #1] ; CHECK-NEXT: mad z1.d, p1/m, z2.d, z2.d ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, lsl #1] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %t2 = insertelement undef, i64 %scale, i32 0 %t3 = shufflevector %t0, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t4 = mul %t3, %step %t5 = add %t1, %t4 %t6 = getelementptr half, ptr %base, %t5 call void @llvm.masked.scatter.nxv4f16( %data, %t6, i32 2, %pg) ret void } ; Ensure we don't use a "vscale x 4" wide scatter when the offset is too big. define void @scatter_i8_index_offset_maximum_plus_one(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_i8_index_offset_maximum_plus_one: ; CHECK: // %bb.0: ; CHECK-NEXT: punpklo p1.h, p0.b ; CHECK-NEXT: mov w8, #33554432 // =0x2000000 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: index z1.d, #0, x8 ; CHECK-NEXT: rdvl x9, #1 ; CHECK-NEXT: add x8, x0, x1 ; CHECK-NEXT: lsr x9, x9, #4 ; CHECK-NEXT: mov w10, #67108864 // =0x4000000 ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: st1b { z2.d }, p1, [x8, z1.d] ; CHECK-NEXT: madd x8, x9, x10, x8 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %t2 = insertelement undef, i64 33554432, i32 0 %t3 = shufflevector %t2, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t4 = mul %t3, %step %t5 = add %t1, %t4 %t6 = getelementptr i8, ptr %base, %t5 call void @llvm.masked.scatter.nxv4i8( %data, %t6, i32 2, %pg) ret void } ; Ensure we don't use a "vscale x 4" wide scatter when the offset is too small. define void @scatter_i8_index_offset_minimum_minus_one(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_i8_index_offset_minimum_minus_one: ; CHECK: // %bb.0: ; CHECK-NEXT: punpklo p1.h, p0.b ; CHECK-NEXT: mov x8, #-33554433 // =0xfffffffffdffffff ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: index z1.d, #0, x8 ; CHECK-NEXT: rdvl x9, #1 ; CHECK-NEXT: mov x10, #-2 // =0xfffffffffffffffe ; CHECK-NEXT: lsr x9, x9, #4 ; CHECK-NEXT: add x8, x0, x1 ; CHECK-NEXT: movk x10, #64511, lsl #16 ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: st1b { z2.d }, p1, [x8, z1.d] ; CHECK-NEXT: madd x8, x9, x10, x8 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %t2 = insertelement undef, i64 -33554433, i32 0 %t3 = shufflevector %t2, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t4 = mul %t3, %step %t5 = add %t1, %t4 %t6 = getelementptr i8, ptr %base, %t5 call void @llvm.masked.scatter.nxv4i8( %data, %t6, i32 2, %pg) ret void } ; Ensure we don't use a "vscale x 4" wide scatter when the stride is too big . define void @scatter_i8_index_stride_too_big(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_i8_index_stride_too_big: ; CHECK: // %bb.0: ; CHECK-NEXT: punpklo p1.h, p0.b ; CHECK-NEXT: mov x8, #4611686018427387904 // =0x4000000000000000 ; CHECK-NEXT: uunpklo z2.d, z0.s ; CHECK-NEXT: index z1.d, #0, x8 ; CHECK-NEXT: rdvl x9, #1 ; CHECK-NEXT: add x8, x0, x1 ; CHECK-NEXT: lsr x9, x9, #4 ; CHECK-NEXT: mov x10, #-9223372036854775808 // =0x8000000000000000 ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: st1b { z2.d }, p1, [x8, z1.d] ; CHECK-NEXT: madd x8, x9, x10, x8 ; CHECK-NEXT: st1b { z0.d }, p0, [x8, z1.d] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %t2 = insertelement undef, i64 4611686018427387904, i32 0 %t3 = shufflevector %t2, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t4 = mul %t3, %step %t5 = add %t1, %t4 %t6 = getelementptr i8, ptr %base, %t5 call void @llvm.masked.scatter.nxv4i8( %data, %t6, i32 2, %pg) ret void } ; Ensure the resulting load is "vscale x 4" wide, despite the offset giving the ; impression the gather must be split due to it's offset. ; gather_f32(base, index(offset, 8 * sizeof(float)) define @gather_8i8_index_offset_8(ptr %base, i64 %offset, %pg) #0 { ; CHECK-LABEL: gather_8i8_index_offset_8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z0.s, #0, #8 ; CHECK-NEXT: add x8, x0, x1, lsl #3 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, sxtw] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t2 = add %t1, %step %t3 = getelementptr [8 x i8], ptr %base, %t2 %t4 = bitcast %t3 to %load = call @llvm.masked.gather.nxv4i8( %t4, i32 4, %pg, undef) ret %load } ; Ensure the resulting load is "vscale x 4" wide, despite the offset giving the ; impression the gather must be split due to it's offset. ; gather_f32(base, index(offset, 8 * sizeof(float)) define @gather_f32_index_offset_8(ptr %base, i64 %offset, %pg) #0 { ; CHECK-LABEL: gather_f32_index_offset_8: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32 // =0x20 ; CHECK-NEXT: index z0.s, #0, w8 ; CHECK-NEXT: add x8, x0, x1, lsl #5 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, sxtw] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t2 = add %t1, %step %t3 = getelementptr [8 x float], ptr %base, %t2 %t4 = bitcast %t3 to %load = call @llvm.masked.gather.nxv4f32( %t4, i32 4, %pg, undef) ret %load } ; Ensure the resulting store is "vscale x 4" wide, despite the offset giving the ; impression the scatter must be split due to it's offset. ; scatter_f16(base, index(offset, 8 * sizeof(i8)) define void @scatter_i8_index_offset_8(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_i8_index_offset_8: ; CHECK: // %bb.0: ; CHECK-NEXT: index z1.s, #0, #8 ; CHECK-NEXT: add x8, x0, x1, lsl #3 ; CHECK-NEXT: st1b { z0.s }, p0, [x8, z1.s, sxtw] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t2 = add %t1, %step %t3 = getelementptr [8 x i8], ptr %base, %t2 %t4 = bitcast %t3 to call void @llvm.masked.scatter.nxv4i8( %data, %t4, i32 2, %pg) ret void } ; Ensure the resulting store is "vscale x 4" wide, despite the offset giving the ; impression the scatter must be split due to it's offset. ; scatter_f16(base, index(offset, 8 * sizeof(half)) define void @scatter_f16_index_offset_8(ptr %base, i64 %offset, %pg, %data) #0 { ; CHECK-LABEL: scatter_f16_index_offset_8: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #16 // =0x10 ; CHECK-NEXT: index z1.s, #0, w8 ; CHECK-NEXT: add x8, x0, x1, lsl #4 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw] ; CHECK-NEXT: ret %t0 = insertelement undef, i64 %offset, i32 0 %t1 = shufflevector %t0, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %t2 = add %t1, %step %t3 = getelementptr [8 x half], ptr %base, %t2 %t4 = bitcast %t3 to call void @llvm.masked.scatter.nxv4f16( %data, %t4, i32 2, %pg) ret void } ; stepvector is hidden further behind GEP and two adds. define void @scatter_f16_index_add_add(ptr %base, i64 %offset, i64 %offset2, %pg, %data) #0 { ; CHECK-LABEL: scatter_f16_index_add_add: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #16 // =0x10 ; CHECK-NEXT: add x9, x0, x2, lsl #4 ; CHECK-NEXT: index z1.s, #0, w8 ; CHECK-NEXT: add x8, x9, x1, lsl #4 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw] ; CHECK-NEXT: ret %splat.offset.ins = insertelement undef, i64 %offset, i32 0 %splat.offset = shufflevector %splat.offset.ins, undef, zeroinitializer %splat.offset2.ins = insertelement undef, i64 %offset2, i32 0 %splat.offset2 = shufflevector %splat.offset2.ins, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %add1 = add %splat.offset, %step %add2 = add %add1, %splat.offset2 %gep = getelementptr [8 x half], ptr %base, %add2 %gep.bc = bitcast %gep to call void @llvm.masked.scatter.nxv4f16( %data, %gep.bc, i32 2, %pg) ret void } ; stepvector is hidden further behind GEP two adds and a shift. define void @scatter_f16_index_add_add_mul(ptr %base, i64 %offset, i64 %offset2, %pg, %data) #0 { ; CHECK-LABEL: scatter_f16_index_add_add_mul: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #128 // =0x80 ; CHECK-NEXT: add x9, x0, x2, lsl #7 ; CHECK-NEXT: index z1.s, #0, w8 ; CHECK-NEXT: add x8, x9, x1, lsl #7 ; CHECK-NEXT: st1h { z0.s }, p0, [x8, z1.s, sxtw] ; CHECK-NEXT: ret %splat.offset.ins = insertelement undef, i64 %offset, i32 0 %splat.offset = shufflevector %splat.offset.ins, undef, zeroinitializer %splat.offset2.ins = insertelement undef, i64 %offset2, i32 0 %splat.offset2 = shufflevector %splat.offset2.ins, undef, zeroinitializer %step = call @llvm.experimental.stepvector.nxv4i64() %add1 = add %splat.offset, %step %add2 = add %add1, %splat.offset2 %splat.const8.ins = insertelement undef, i64 8, i32 0 %splat.const8 = shufflevector %splat.const8.ins, undef, zeroinitializer %mul = mul %add2, %splat.const8 %gep = getelementptr [8 x half], ptr %base, %mul %gep.bc = bitcast %gep to call void @llvm.masked.scatter.nxv4f16( %data, %gep.bc, i32 2, %pg) ret void } define @masked_gather_nxv2i64_const_with_vec_offsets( %vector_offsets, %pg) #0 { ; CHECK-LABEL: masked_gather_nxv2i64_const_with_vec_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #8 // =0x8 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3] ; CHECK-NEXT: ret %ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), %vector_offsets %data = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %pg, undef) ret %data } define @masked_gather_nxv2i64_null_with_vec_plus_scalar_offsets( %vector_offsets, i64 %scalar_offset, %pg) #0 { ; CHECK-LABEL: masked_gather_nxv2i64_null_with_vec_plus_scalar_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl x8, x0, #3 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3] ; CHECK-NEXT: ret %scalar_offset.ins = insertelement undef, i64 %scalar_offset, i64 0 %scalar_offset.splat = shufflevector %scalar_offset.ins, undef, zeroinitializer %offsets = add %vector_offsets, %scalar_offset.splat %ptrs = getelementptr i64, ptr null, %offsets %data = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %pg, undef) ret %data } define @masked_gather_nxv2i64_null_with__vec_plus_imm_offsets( %vector_offsets, %pg) #0 { ; CHECK-LABEL: masked_gather_nxv2i64_null_with__vec_plus_imm_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #8 // =0x8 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3] ; CHECK-NEXT: ret %scalar_offset.ins = insertelement undef, i64 1, i64 0 %scalar_offset.splat = shufflevector %scalar_offset.ins, undef, zeroinitializer %offsets = add %vector_offsets, %scalar_offset.splat %ptrs = getelementptr i64, ptr null, %offsets %data = call @llvm.masked.gather.nxv2i64( %ptrs, i32 8, %pg, undef) ret %data } define @masked_gather_nxv4i32_s8_offsets(ptr %base, %offsets, %mask) #0 { ; CHECK-LABEL: masked_gather_nxv4i32_s8_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p1.s ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2] ; CHECK-NEXT: ret %offsets.sext = sext %offsets to %ptrs = getelementptr i32, ptr %base, %offsets.sext %data = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) ret %data } define @masked_gather_nxv4i32_u8_offsets(ptr %base, %offsets, %mask) #0 { ; CHECK-LABEL: masked_gather_nxv4i32_u8_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: and z0.s, z0.s, #0xff ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret %offsets.zext = zext %offsets to %ptrs = getelementptr i32, ptr %base, %offsets.zext %data = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) ret %data } define @masked_gather_nxv4i32_u32s8_offsets(ptr %base, %offsets, %mask) #0 { ; CHECK-LABEL: masked_gather_nxv4i32_u32s8_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p1.s ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret %offsets.sext = sext %offsets to %offsets.sext.zext = zext %offsets.sext to %ptrs = getelementptr i32, ptr %base, %offsets.sext.zext %data = call @llvm.masked.gather.nxv4i32( %ptrs, i32 4, %mask, undef) ret %data } define void @masked_scatter_nxv2i64_const_with_vec_offsets( %vector_offsets, %pg, %data) #0 { ; CHECK-LABEL: masked_scatter_nxv2i64_const_with_vec_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #8 // =0x8 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3] ; CHECK-NEXT: ret %ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), %vector_offsets call void @llvm.masked.scatter.nxv2i64( %data, %ptrs, i32 8, %pg) ret void } define void @masked_scatter_nxv2i64_null_with_vec_plus_scalar_offsets( %vector_offsets, i64 %scalar_offset, %pg, %data) #0 { ; CHECK-LABEL: masked_scatter_nxv2i64_null_with_vec_plus_scalar_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: lsl x8, x0, #3 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3] ; CHECK-NEXT: ret %scalar_offset.ins = insertelement undef, i64 %scalar_offset, i64 0 %scalar_offset.splat = shufflevector %scalar_offset.ins, undef, zeroinitializer %offsets = add %vector_offsets, %scalar_offset.splat %ptrs = getelementptr i64, ptr null, %offsets call void @llvm.masked.scatter.nxv2i64( %data, %ptrs, i32 8, %pg) ret void } define void @masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets( %vector_offsets, %pg, %data) #0 { ; CHECK-LABEL: masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #8 // =0x8 ; CHECK-NEXT: st1d { z1.d }, p0, [x8, z0.d, lsl #3] ; CHECK-NEXT: ret %scalar_offset.ins = insertelement undef, i64 1, i64 0 %scalar_offset.splat = shufflevector %scalar_offset.ins, undef, zeroinitializer %offsets = add %vector_offsets, %scalar_offset.splat %ptrs = getelementptr i64, ptr null, %offsets call void @llvm.masked.scatter.nxv2i64( %data, %ptrs, i32 8, %pg) ret void } define void @masked_scatter_nxv4i32_s8_offsets(ptr %base, %offsets, %mask, %data) #0 { ; CHECK-LABEL: masked_scatter_nxv4i32_s8_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p1.s ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s ; CHECK-NEXT: st1w { z1.s }, p0, [x0, z0.s, sxtw #2] ; CHECK-NEXT: ret %offsets.sext = sext %offsets to %ptrs = getelementptr i32, ptr %base, %offsets.sext call void @llvm.masked.scatter.nxv4i32( %data, %ptrs, i32 4, %mask) ret void } define void @masked_scatter_nxv4i32_u8_offsets(ptr %base, %offsets, %mask, %data) #0 { ; CHECK-LABEL: masked_scatter_nxv4i32_u8_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: and z0.s, z0.s, #0xff ; CHECK-NEXT: st1w { z1.s }, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret %offsets.zext = zext %offsets to %ptrs = getelementptr i32, ptr %base, %offsets.zext call void @llvm.masked.scatter.nxv4i32( %data, %ptrs, i32 4, %mask) ret void } define void @masked_scatter_nxv4i32_u32s8_offsets(ptr %base, %offsets, %mask, %data) #0 { ; CHECK-LABEL: masked_scatter_nxv4i32_u32s8_offsets: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p1.s ; CHECK-NEXT: sxtb z0.s, p1/m, z0.s ; CHECK-NEXT: st1w { z1.s }, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret %offsets.sext = sext %offsets to %offsets.sext.zext = zext %offsets.sext to %ptrs = getelementptr i32, ptr %base, %offsets.sext.zext call void @llvm.masked.scatter.nxv4i32( %data, %ptrs, i32 4, %mask) ret void } attributes #0 = { "target-features"="+sve" vscale_range(1, 16) } declare @llvm.masked.gather.nxv2i64(, i32, , ) declare @llvm.masked.gather.nxv4i8(, i32, , ) declare @llvm.masked.gather.nxv4i32(, i32, , ) declare @llvm.masked.gather.nxv4f32(, i32, , ) declare void @llvm.masked.scatter.nxv2i64(, , i32, ) declare void @llvm.masked.scatter.nxv4i8(, , i32, ) declare void @llvm.masked.scatter.nxv4i16(, , i32, ) declare void @llvm.masked.scatter.nxv4i32(, , i32, ) declare void @llvm.masked.scatter.nxv4f16(, , i32, ) declare @llvm.experimental.stepvector.nxv4i64()