; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; ; ST1H, ST1W, ST1D: base + 64-bit scaled offset ; e.g. st1h { z0.d }, p0, [x0, z0.d, lsl #1] ; define void @sst1h_index( %data, %pg, ptr %base, %offsets) { ; CHECK-LABEL: sst1h_index: ; CHECK: // %bb.0: ; CHECK-NEXT: st1h { z0.d }, p0, [x0, z1.d, lsl #1] ; CHECK-NEXT: ret %data_trunc = trunc %data to call void @llvm.aarch64.sve.st1.scatter.index.nxv2i16( %data_trunc, %pg, ptr %base, %offsets) ret void } define void @sst1w_index( %data, %pg, ptr %base, %offsets) { ; CHECK-LABEL: sst1w_index: ; CHECK: // %bb.0: ; CHECK-NEXT: st1w { z0.d }, p0, [x0, z1.d, lsl #2] ; CHECK-NEXT: ret %data_trunc = trunc %data to call void @llvm.aarch64.sve.st1.scatter.index.nxv2i32( %data_trunc, %pg, ptr %base, %offsets) ret void } define void @sst1d_index( %data, %pg, ptr %base, %offsets) { ; CHECK-LABEL: sst1d_index: ; CHECK: // %bb.0: ; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, lsl #3] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.scatter.index.nxv2i64( %data, %pg, ptr %base, %offsets) ret void } define void @sst1d_index_double( %data, %pg, ptr %base, %offsets) { ; CHECK-LABEL: sst1d_index_double: ; CHECK: // %bb.0: ; CHECK-NEXT: st1d { z0.d }, p0, [x0, z1.d, lsl #3] ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.scatter.index.nxv2f64( %data, %pg, ptr %base, %offsets) ret void } declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(, , ptr, ) declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i32(, , ptr, ) declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i64(, , ptr, ) declare void @llvm.aarch64.sve.st1.scatter.index.nxv2f64(, , ptr, )