; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s define <8 x i8> @test_vshr_n_s8(<8 x i8> %a) { ; CHECK-LABEL: test_vshr_n_s8: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.8b, v0.8b, #3 ; CHECK-NEXT: ret %vshr_n = ashr <8 x i8> %a, ret <8 x i8> %vshr_n } define <4 x i16> @test_vshr_n_s16(<4 x i16> %a) { ; CHECK-LABEL: test_vshr_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.4h, v0.4h, #3 ; CHECK-NEXT: ret %vshr_n = ashr <4 x i16> %a, ret <4 x i16> %vshr_n } define <2 x i32> @test_vshr_n_s32(<2 x i32> %a) { ; CHECK-LABEL: test_vshr_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.2s, v0.2s, #3 ; CHECK-NEXT: ret %vshr_n = ashr <2 x i32> %a, ret <2 x i32> %vshr_n } define <16 x i8> @test_vshrq_n_s8(<16 x i8> %a) { ; CHECK-LABEL: test_vshrq_n_s8: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.16b, v0.16b, #3 ; CHECK-NEXT: ret %vshr_n = ashr <16 x i8> %a, ret <16 x i8> %vshr_n } define <8 x i16> @test_vshrq_n_s16(<8 x i16> %a) { ; CHECK-LABEL: test_vshrq_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.8h, v0.8h, #3 ; CHECK-NEXT: ret %vshr_n = ashr <8 x i16> %a, ret <8 x i16> %vshr_n } define <4 x i32> @test_vshrq_n_s32(<4 x i32> %a) { ; CHECK-LABEL: test_vshrq_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.4s, v0.4s, #3 ; CHECK-NEXT: ret %vshr_n = ashr <4 x i32> %a, ret <4 x i32> %vshr_n } define <2 x i64> @test_vshrq_n_s64(<2 x i64> %a) { ; CHECK-LABEL: test_vshrq_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: sshr v0.2d, v0.2d, #3 ; CHECK-NEXT: ret %vshr_n = ashr <2 x i64> %a, ret <2 x i64> %vshr_n } define <8 x i8> @test_vshr_n_u8(<8 x i8> %a) { ; CHECK-LABEL: test_vshr_n_u8: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.8b, v0.8b, #3 ; CHECK-NEXT: ret %vshr_n = lshr <8 x i8> %a, ret <8 x i8> %vshr_n } define <4 x i16> @test_vshr_n_u16(<4 x i16> %a) { ; CHECK-LABEL: test_vshr_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.4h, v0.4h, #3 ; CHECK-NEXT: ret %vshr_n = lshr <4 x i16> %a, ret <4 x i16> %vshr_n } define <2 x i32> @test_vshr_n_u32(<2 x i32> %a) { ; CHECK-LABEL: test_vshr_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.2s, v0.2s, #3 ; CHECK-NEXT: ret %vshr_n = lshr <2 x i32> %a, ret <2 x i32> %vshr_n } define <16 x i8> @test_vshrq_n_u8(<16 x i8> %a) { ; CHECK-LABEL: test_vshrq_n_u8: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.16b, v0.16b, #3 ; CHECK-NEXT: ret %vshr_n = lshr <16 x i8> %a, ret <16 x i8> %vshr_n } define <8 x i16> @test_vshrq_n_u16(<8 x i16> %a) { ; CHECK-LABEL: test_vshrq_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.8h, v0.8h, #3 ; CHECK-NEXT: ret %vshr_n = lshr <8 x i16> %a, ret <8 x i16> %vshr_n } define <4 x i32> @test_vshrq_n_u32(<4 x i32> %a) { ; CHECK-LABEL: test_vshrq_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.4s, v0.4s, #3 ; CHECK-NEXT: ret %vshr_n = lshr <4 x i32> %a, ret <4 x i32> %vshr_n } define <2 x i64> @test_vshrq_n_u64(<2 x i64> %a) { ; CHECK-LABEL: test_vshrq_n_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: ushr v0.2d, v0.2d, #3 ; CHECK-NEXT: ret %vshr_n = lshr <2 x i64> %a, ret <2 x i64> %vshr_n } define <8 x i8> @test_vsra_n_s8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: test_vsra_n_s8: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.8b, v1.8b, #3 ; CHECK-NEXT: ret %vsra_n = ashr <8 x i8> %b, %1 = add <8 x i8> %vsra_n, %a ret <8 x i8> %1 } define <4 x i16> @test_vsra_n_s16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: test_vsra_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.4h, v1.4h, #3 ; CHECK-NEXT: ret %vsra_n = ashr <4 x i16> %b, %1 = add <4 x i16> %vsra_n, %a ret <4 x i16> %1 } define <2 x i32> @test_vsra_n_s32(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: test_vsra_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.2s, v1.2s, #3 ; CHECK-NEXT: ret %vsra_n = ashr <2 x i32> %b, %1 = add <2 x i32> %vsra_n, %a ret <2 x i32> %1 } define <16 x i8> @test_vsraq_n_s8(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test_vsraq_n_s8: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.16b, v1.16b, #3 ; CHECK-NEXT: ret %vsra_n = ashr <16 x i8> %b, %1 = add <16 x i8> %vsra_n, %a ret <16 x i8> %1 } define <8 x i16> @test_vsraq_n_s16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vsraq_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.8h, v1.8h, #3 ; CHECK-NEXT: ret %vsra_n = ashr <8 x i16> %b, %1 = add <8 x i16> %vsra_n, %a ret <8 x i16> %1 } define <4 x i32> @test_vsraq_n_s32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vsraq_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.4s, v1.4s, #3 ; CHECK-NEXT: ret %vsra_n = ashr <4 x i32> %b, %1 = add <4 x i32> %vsra_n, %a ret <4 x i32> %1 } define <2 x i64> @test_vsraq_n_s64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vsraq_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: ssra v0.2d, v1.2d, #3 ; CHECK-NEXT: ret %vsra_n = ashr <2 x i64> %b, %1 = add <2 x i64> %vsra_n, %a ret <2 x i64> %1 } define <8 x i8> @test_vsra_n_u8(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: test_vsra_n_u8: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.8b, v1.8b, #3 ; CHECK-NEXT: ret %vsra_n = lshr <8 x i8> %b, %1 = add <8 x i8> %vsra_n, %a ret <8 x i8> %1 } define <4 x i16> @test_vsra_n_u16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: test_vsra_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.4h, v1.4h, #3 ; CHECK-NEXT: ret %vsra_n = lshr <4 x i16> %b, %1 = add <4 x i16> %vsra_n, %a ret <4 x i16> %1 } define <2 x i32> @test_vsra_n_u32(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: test_vsra_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.2s, v1.2s, #3 ; CHECK-NEXT: ret %vsra_n = lshr <2 x i32> %b, %1 = add <2 x i32> %vsra_n, %a ret <2 x i32> %1 } define <16 x i8> @test_vsraq_n_u8(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test_vsraq_n_u8: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.16b, v1.16b, #3 ; CHECK-NEXT: ret %vsra_n = lshr <16 x i8> %b, %1 = add <16 x i8> %vsra_n, %a ret <16 x i8> %1 } define <8 x i16> @test_vsraq_n_u16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vsraq_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.8h, v1.8h, #3 ; CHECK-NEXT: ret %vsra_n = lshr <8 x i16> %b, %1 = add <8 x i16> %vsra_n, %a ret <8 x i16> %1 } define <4 x i32> @test_vsraq_n_u32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vsraq_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.4s, v1.4s, #3 ; CHECK-NEXT: ret %vsra_n = lshr <4 x i32> %b, %1 = add <4 x i32> %vsra_n, %a ret <4 x i32> %1 } define <2 x i64> @test_vsraq_n_u64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vsraq_n_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: usra v0.2d, v1.2d, #3 ; CHECK-NEXT: ret %vsra_n = lshr <2 x i64> %b, %1 = add <2 x i64> %vsra_n, %a ret <2 x i64> %1 } define <8 x i8> @test_vshrn_n_s16(<8 x i16> %a) { ; CHECK-LABEL: test_vshrn_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: shrn v0.8b, v0.8h, #3 ; CHECK-NEXT: ret %1 = ashr <8 x i16> %a, %vshrn_n = trunc <8 x i16> %1 to <8 x i8> ret <8 x i8> %vshrn_n } define <4 x i16> @test_vshrn_n_s32(<4 x i32> %a) { ; CHECK-LABEL: test_vshrn_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: shrn v0.4h, v0.4s, #9 ; CHECK-NEXT: ret %1 = ashr <4 x i32> %a, %vshrn_n = trunc <4 x i32> %1 to <4 x i16> ret <4 x i16> %vshrn_n } define <2 x i32> @test_vshrn_n_s64(<2 x i64> %a) { ; CHECK-LABEL: test_vshrn_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: shrn v0.2s, v0.2d, #19 ; CHECK-NEXT: ret %1 = ashr <2 x i64> %a, %vshrn_n = trunc <2 x i64> %1 to <2 x i32> ret <2 x i32> %vshrn_n } define <8 x i8> @test_vshrn_n_u16(<8 x i16> %a) { ; CHECK-LABEL: test_vshrn_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: shrn v0.8b, v0.8h, #3 ; CHECK-NEXT: ret %1 = lshr <8 x i16> %a, %vshrn_n = trunc <8 x i16> %1 to <8 x i8> ret <8 x i8> %vshrn_n } define <4 x i16> @test_vshrn_n_u32(<4 x i32> %a) { ; CHECK-LABEL: test_vshrn_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: shrn v0.4h, v0.4s, #9 ; CHECK-NEXT: ret %1 = lshr <4 x i32> %a, %vshrn_n = trunc <4 x i32> %1 to <4 x i16> ret <4 x i16> %vshrn_n } define <2 x i32> @test_vshrn_n_u64(<2 x i64> %a) { ; CHECK-LABEL: test_vshrn_n_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: shrn v0.2s, v0.2d, #19 ; CHECK-NEXT: ret %1 = lshr <2 x i64> %a, %vshrn_n = trunc <2 x i64> %1 to <2 x i32> ret <2 x i32> %vshrn_n } define <16 x i8> @test_vshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vshrn_high_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: shrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %1 = ashr <8 x i16> %b, %vshrn_n = trunc <8 x i16> %1 to <8 x i8> %2 = bitcast <8 x i8> %a to <1 x i64> %3 = bitcast <8 x i8> %vshrn_n to <1 x i64> %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> %4 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %4 } define <8 x i16> @test_vshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vshrn_high_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: shrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %1 = ashr <4 x i32> %b, %vshrn_n = trunc <4 x i32> %1 to <4 x i16> %2 = bitcast <4 x i16> %a to <1 x i64> %3 = bitcast <4 x i16> %vshrn_n to <1 x i64> %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> %4 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %4 } define <4 x i32> @test_vshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vshrn_high_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: shrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %2 = ashr <2 x i64> %b, %vshrn_n = trunc <2 x i64> %2 to <2 x i32> %3 = bitcast <2 x i32> %vshrn_n to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> %4 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %4 } define <16 x i8> @test_vshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vshrn_high_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: shrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %1 = lshr <8 x i16> %b, %vshrn_n = trunc <8 x i16> %1 to <8 x i8> %2 = bitcast <8 x i8> %a to <1 x i64> %3 = bitcast <8 x i8> %vshrn_n to <1 x i64> %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> %4 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %4 } define <8 x i16> @test_vshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vshrn_high_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: shrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %1 = lshr <4 x i32> %b, %vshrn_n = trunc <4 x i32> %1 to <4 x i16> %2 = bitcast <4 x i16> %a to <1 x i64> %3 = bitcast <4 x i16> %vshrn_n to <1 x i64> %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> %4 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %4 } define <4 x i32> @test_vshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vshrn_high_n_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: shrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %2 = lshr <2 x i64> %b, %vshrn_n = trunc <2 x i64> %2 to <2 x i32> %3 = bitcast <2 x i32> %vshrn_n to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> %4 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %4 } define <16 x i8> @test_vqshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vqshrun_high_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqshrun2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vqshrun = tail call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vqshrun to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vqshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vqshrun_high_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqshrun2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vqshrun = tail call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vqshrun to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vqshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vqshrun_high_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqshrun2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vqshrun = tail call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vqshrun to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } define <16 x i8> @test_vrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vrshrn_high_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: rshrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vrshrn = tail call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vrshrn_high_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: rshrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vrshrn = tail call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vrshrn_high_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: rshrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vrshrn = tail call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } define <16 x i8> @test_vqrshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vqrshrun_high_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqrshrun2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vqrshrun to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vqrshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vqrshrun_high_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqrshrun2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vqrshrun to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vqrshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vqrshrun_high_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqrshrun2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vqrshrun to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } define <16 x i8> @test_vqshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vqshrn_high_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqshrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vqshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vqshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vqshrn_high_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqshrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vqshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vqshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vqshrn_high_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqshrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vqshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } define <16 x i8> @test_vqshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vqshrn_high_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: uqshrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vqshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vqshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vqshrn_high_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: uqshrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vqshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vqshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vqshrn_high_n_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: uqshrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vqshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } define <16 x i8> @test_vqrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vqrshrn_high_n_s16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqrshrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vqrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vqrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vqrshrn_high_n_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqrshrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vqrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vqrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vqrshrn_high_n_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: sqrshrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vqrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } define <16 x i8> @test_vqrshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) { ; CHECK-LABEL: test_vqrshrn_high_n_u16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: uqrshrn2 v0.16b, v1.8h, #3 ; CHECK-NEXT: ret %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %b, i32 3) %1 = bitcast <8 x i8> %a to <1 x i64> %2 = bitcast <8 x i8> %vqrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <16 x i8> ret <16 x i8> %3 } define <8 x i16> @test_vqrshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) { ; CHECK-LABEL: test_vqrshrn_high_n_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: uqrshrn2 v0.8h, v1.4s, #9 ; CHECK-NEXT: ret %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %b, i32 9) %1 = bitcast <4 x i16> %a to <1 x i64> %2 = bitcast <4 x i16> %vqrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <8 x i16> ret <8 x i16> %3 } define <4 x i32> @test_vqrshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) { ; CHECK-LABEL: test_vqrshrn_high_n_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: uqrshrn2 v0.4s, v1.2d, #19 ; CHECK-NEXT: ret %1 = bitcast <2 x i32> %a to <1 x i64> %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %b, i32 19) %2 = bitcast <2 x i32> %vqrshrn to <1 x i64> %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> %3 = bitcast <2 x i64> %shuffle.i to <4 x i32> ret <4 x i32> %3 } declare <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64>, i32) declare <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64>, i32) declare <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64>, i32) declare <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64>, i32) declare <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64>, i32) declare <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64>, i32) declare <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16>, i32) declare <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32>, i32) declare <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64>, i32) declare <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) declare <4 x float> @llvm.aarch64.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) declare <2 x double> @llvm.aarch64.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32) declare <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) declare <4 x float> @llvm.aarch64.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) declare <2 x double> @llvm.aarch64.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32) declare <2 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32) declare <4 x i32> @llvm.aarch64.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32) declare <2 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double>, i32) declare <2 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32) declare <4 x i32> @llvm.aarch64.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32) declare <2 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double>, i32) define <1 x i64> @test_vcvt_n_s64_f64(<1 x double> %a) { ; CHECK-LABEL: test_vcvt_n_s64_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzs d0, d0, #64 ; CHECK-NEXT: ret %1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double> %a, i32 64) ret <1 x i64> %1 } define <1 x i64> @test_vcvt_n_u64_f64(<1 x double> %a) { ; CHECK-LABEL: test_vcvt_n_u64_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fcvtzu d0, d0, #64 ; CHECK-NEXT: ret %1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double> %a, i32 64) ret <1 x i64> %1 } define <1 x double> @test_vcvt_n_f64_s64(<1 x i64> %a) { ; CHECK-LABEL: test_vcvt_n_f64_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: scvtf d0, d0, #64 ; CHECK-NEXT: ret %1 = tail call <1 x double> @llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64> %a, i32 64) ret <1 x double> %1 } define <1 x double> @test_vcvt_n_f64_u64(<1 x i64> %a) { ; CHECK-LABEL: test_vcvt_n_f64_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: ucvtf d0, d0, #64 ; CHECK-NEXT: ret %1 = tail call <1 x double> @llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64> %a, i32 64) ret <1 x double> %1 } declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double>, i32) declare <1 x i64> @llvm.aarch64.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double>, i32) declare <1 x double> @llvm.aarch64.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64>, i32) declare <1 x double> @llvm.aarch64.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64>, i32)