; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s declare { , i64 } @llvm.riscv.vleff.nxv8i8(, *, i64) declare { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(, *, , i64, i64 immarg) declare {,, i64} @llvm.riscv.vlseg2ff.nxv8i8(, , i8* , i64) declare {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(,, i8*, , i64, i64) define i64 @test_vleff_nxv8i8( *%p, i64 %vl) { ; CHECK-LABEL: name: test_vleff_nxv8i8 ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: %0 = call { , i64 } @llvm.riscv.vleff.nxv8i8( undef, * %p, i64 %vl) %1 = extractvalue { , i64 } %0, 1 ret i64 %1 } define i64 @test_vleff_nxv8i8_tu( %merge, *%p, i64 %vl) { ; CHECK-LABEL: name: test_vleff_nxv8i8_tu ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $v8, $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: %0 = call { , i64 } @llvm.riscv.vleff.nxv8i8( %merge, * %p, i64 %vl) %1 = extractvalue { , i64 } %0, 1 ret i64 %1 } define i64 @test_vleff_nxv8i8_mask( %maskedoff, *%p, %m, i64 %vl) { ; CHECK-LABEL: name: test_vleff_nxv8i8_mask ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 ; CHECK-NEXT: $v0 = COPY [[COPY1]] ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: %0 = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( %maskedoff, *%p, %m, i64 %vl, i64 0) %1 = extractvalue { , i64 } %0, 1 ret i64 %1 } define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[DEF3:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[DEF]], %subreg.sub_vrm1_0, [[DEF2]], %subreg.sub_vrm1_1 ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( undef, undef, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 ret i64 %1 } define i64 @test_vlseg2ff_nxv8i8_tu( %val, i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $v8, $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.nxv8i8( %val, %val, i8* %base, i64 %vl) %1 = extractvalue {,, i64} %0, 2 ret i64 %1 } define i64 @test_vlseg2ff_nxv8i8_mask( %val, i8* %base, %mask, i64 %vl, i64* %outvl) { ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1 ; CHECK-NEXT: $v0 = COPY [[COPY1]] ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]] ; CHECK-NEXT: PseudoRET implicit $x10 entry: %0 = tail call {,, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl, i64 0) %1 = extractvalue {,, i64} %0, 2 ret i64 %1 }