; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare <16 x i8> @llvm.loongarch.lsx.vsrlr.b(<16 x i8>, <16 x i8>) define <16 x i8> @lsx_vsrlr_b(<16 x i8> %va, <16 x i8> %vb) nounwind { ; CHECK-LABEL: lsx_vsrlr_b: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlr.b $vr0, $vr0, $vr1 ; CHECK-NEXT: ret entry: %res = call <16 x i8> @llvm.loongarch.lsx.vsrlr.b(<16 x i8> %va, <16 x i8> %vb) ret <16 x i8> %res } declare <8 x i16> @llvm.loongarch.lsx.vsrlr.h(<8 x i16>, <8 x i16>) define <8 x i16> @lsx_vsrlr_h(<8 x i16> %va, <8 x i16> %vb) nounwind { ; CHECK-LABEL: lsx_vsrlr_h: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlr.h $vr0, $vr0, $vr1 ; CHECK-NEXT: ret entry: %res = call <8 x i16> @llvm.loongarch.lsx.vsrlr.h(<8 x i16> %va, <8 x i16> %vb) ret <8 x i16> %res } declare <4 x i32> @llvm.loongarch.lsx.vsrlr.w(<4 x i32>, <4 x i32>) define <4 x i32> @lsx_vsrlr_w(<4 x i32> %va, <4 x i32> %vb) nounwind { ; CHECK-LABEL: lsx_vsrlr_w: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlr.w $vr0, $vr0, $vr1 ; CHECK-NEXT: ret entry: %res = call <4 x i32> @llvm.loongarch.lsx.vsrlr.w(<4 x i32> %va, <4 x i32> %vb) ret <4 x i32> %res } declare <2 x i64> @llvm.loongarch.lsx.vsrlr.d(<2 x i64>, <2 x i64>) define <2 x i64> @lsx_vsrlr_d(<2 x i64> %va, <2 x i64> %vb) nounwind { ; CHECK-LABEL: lsx_vsrlr_d: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlr.d $vr0, $vr0, $vr1 ; CHECK-NEXT: ret entry: %res = call <2 x i64> @llvm.loongarch.lsx.vsrlr.d(<2 x i64> %va, <2 x i64> %vb) ret <2 x i64> %res } declare <16 x i8> @llvm.loongarch.lsx.vsrlri.b(<16 x i8>, i32) define <16 x i8> @lsx_vsrlri_b(<16 x i8> %va) nounwind { ; CHECK-LABEL: lsx_vsrlri_b: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlri.b $vr0, $vr0, 1 ; CHECK-NEXT: ret entry: %res = call <16 x i8> @llvm.loongarch.lsx.vsrlri.b(<16 x i8> %va, i32 1) ret <16 x i8> %res } declare <8 x i16> @llvm.loongarch.lsx.vsrlri.h(<8 x i16>, i32) define <8 x i16> @lsx_vsrlri_h(<8 x i16> %va) nounwind { ; CHECK-LABEL: lsx_vsrlri_h: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlri.h $vr0, $vr0, 1 ; CHECK-NEXT: ret entry: %res = call <8 x i16> @llvm.loongarch.lsx.vsrlri.h(<8 x i16> %va, i32 1) ret <8 x i16> %res } declare <4 x i32> @llvm.loongarch.lsx.vsrlri.w(<4 x i32>, i32) define <4 x i32> @lsx_vsrlri_w(<4 x i32> %va) nounwind { ; CHECK-LABEL: lsx_vsrlri_w: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlri.w $vr0, $vr0, 1 ; CHECK-NEXT: ret entry: %res = call <4 x i32> @llvm.loongarch.lsx.vsrlri.w(<4 x i32> %va, i32 1) ret <4 x i32> %res } declare <2 x i64> @llvm.loongarch.lsx.vsrlri.d(<2 x i64>, i32) define <2 x i64> @lsx_vsrlri_d(<2 x i64> %va) nounwind { ; CHECK-LABEL: lsx_vsrlri_d: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsrlri.d $vr0, $vr0, 1 ; CHECK-NEXT: ret entry: %res = call <2 x i64> @llvm.loongarch.lsx.vsrlri.d(<2 x i64> %va, i32 1) ret <2 x i64> %res }