; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 < %s | FileCheck %s ; Merge define @test_pmov_to_vector_i16( %zn, %pn) { ; CHECK-LABEL: test_pmov_to_vector_i16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w0, #1 // =0x1 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16( %zn, %pn, i32 1) ret %res } define @test_pmov_to_vector_i32( %zn, %pn) { ; CHECK-LABEL: test_pmov_to_vector_i32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w0, #3 // =0x3 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32( %zn, %pn, i32 3) ret %res } define @test_pmov_to_vector_i64( %zn, %pn) { ; CHECK-LABEL: test_pmov_to_vector_i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w0, #7 // =0x7 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64( %zn, %pn, i32 7) ret %res } ; Zero define @test_pmov_to_vector_zero_i8( %pn) { ; CHECK-LABEL: test_pmov_to_vector_zero_i8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8( %pn) ret %res } define @test_pmov_to_vector_zero_i16( %pn) { ; CHECK-LABEL: test_pmov_to_vector_zero_i16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16( %pn) ret %res } define @test_pmov_to_vector_zero_i32( %pn) { ; CHECK-LABEL: test_pmov_to_vector_zero_i32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32( %pn) ret %res } define @test_pmov_to_vector_zero_i64( %pn) { ; CHECK-LABEL: test_pmov_to_vector_zero_i64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret entry: %res = call @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64( %pn) ret %res } declare @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16(, , i32) declare @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32(, , i32) declare @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64(, , i32) declare @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8() declare @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16() declare @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32() declare @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64()