# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=SSE # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=AVX --- | define void @test_xor_v32i8() { %ret = xor <32 x i8> undef, undef ret void } define void @test_xor_v16i16() { %ret = xor <16 x i16> undef, undef ret void } define void @test_xor_v8i32() { %ret = xor <8 x i32> undef, undef ret void } define void @test_xor_v4i64() { %ret = xor <4 x i64> undef, undef ret void } ... --- name: test_xor_v32i8 alignment: 16 legalized: false regBankSelected: false registers: - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } body: | bb.1 (%ir-block.0): liveins: $ymm0, $ymm1 ; SSE-LABEL: name: test_xor_v32i8 ; SSE: liveins: $ymm0, $ymm1 ; SSE-NEXT: {{ $}} ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF ; SSE-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>) ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>) ; SSE-NEXT: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[UV]], [[UV2]] ; SSE-NEXT: [[XOR1:%[0-9]+]]:_(<16 x s8>) = G_XOR [[UV1]], [[UV3]] ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[XOR]](<16 x s8>), [[XOR1]](<16 x s8>) ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>) ; SSE-NEXT: RET 0 ; AVX-LABEL: name: test_xor_v32i8 ; AVX: liveins: $ymm0, $ymm1 ; AVX-NEXT: {{ $}} ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF ; AVX-NEXT: [[XOR:%[0-9]+]]:_(<32 x s8>) = G_XOR [[DEF]], [[DEF1]] ; AVX-NEXT: $ymm0 = COPY [[XOR]](<32 x s8>) ; AVX-NEXT: RET 0 %0(<32 x s8>) = IMPLICIT_DEF %1(<32 x s8>) = IMPLICIT_DEF %2(<32 x s8>) = G_XOR %0, %1 $ymm0 = COPY %2 RET 0 ... --- name: test_xor_v16i16 alignment: 16 legalized: false regBankSelected: false registers: - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } body: | bb.1 (%ir-block.0): liveins: $ymm0, $ymm1 ; SSE-LABEL: name: test_xor_v16i16 ; SSE: liveins: $ymm0, $ymm1 ; SSE-NEXT: {{ $}} ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF ; SSE-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>) ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>) ; SSE-NEXT: [[XOR:%[0-9]+]]:_(<8 x s16>) = G_XOR [[UV]], [[UV2]] ; SSE-NEXT: [[XOR1:%[0-9]+]]:_(<8 x s16>) = G_XOR [[UV1]], [[UV3]] ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[XOR]](<8 x s16>), [[XOR1]](<8 x s16>) ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>) ; SSE-NEXT: RET 0 ; AVX-LABEL: name: test_xor_v16i16 ; AVX: liveins: $ymm0, $ymm1 ; AVX-NEXT: {{ $}} ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF ; AVX-NEXT: [[XOR:%[0-9]+]]:_(<16 x s16>) = G_XOR [[DEF]], [[DEF1]] ; AVX-NEXT: $ymm0 = COPY [[XOR]](<16 x s16>) ; AVX-NEXT: RET 0 %0(<16 x s16>) = IMPLICIT_DEF %1(<16 x s16>) = IMPLICIT_DEF %2(<16 x s16>) = G_XOR %0, %1 $ymm0 = COPY %2 RET 0 ... --- name: test_xor_v8i32 alignment: 16 legalized: false regBankSelected: false registers: - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } body: | bb.1 (%ir-block.0): liveins: $ymm0, $ymm1 ; SSE-LABEL: name: test_xor_v8i32 ; SSE: liveins: $ymm0, $ymm1 ; SSE-NEXT: {{ $}} ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF ; SSE-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>) ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>) ; SSE-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[UV]], [[UV2]] ; SSE-NEXT: [[XOR1:%[0-9]+]]:_(<4 x s32>) = G_XOR [[UV1]], [[UV3]] ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[XOR]](<4 x s32>), [[XOR1]](<4 x s32>) ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>) ; SSE-NEXT: RET 0 ; AVX-LABEL: name: test_xor_v8i32 ; AVX: liveins: $ymm0, $ymm1 ; AVX-NEXT: {{ $}} ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF ; AVX-NEXT: [[XOR:%[0-9]+]]:_(<8 x s32>) = G_XOR [[DEF]], [[DEF1]] ; AVX-NEXT: $ymm0 = COPY [[XOR]](<8 x s32>) ; AVX-NEXT: RET 0 %0(<8 x s32>) = IMPLICIT_DEF %1(<8 x s32>) = IMPLICIT_DEF %2(<8 x s32>) = G_XOR %0, %1 $ymm0 = COPY %2 RET 0 ... --- name: test_xor_v4i64 alignment: 16 legalized: false regBankSelected: false registers: - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } body: | bb.1 (%ir-block.0): liveins: $ymm0, $ymm1 ; SSE-LABEL: name: test_xor_v4i64 ; SSE: liveins: $ymm0, $ymm1 ; SSE-NEXT: {{ $}} ; SSE-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF ; SSE-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF ; SSE-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>) ; SSE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>) ; SSE-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[UV]], [[UV2]] ; SSE-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[UV1]], [[UV3]] ; SSE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[XOR]](<2 x s64>), [[XOR1]](<2 x s64>) ; SSE-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>) ; SSE-NEXT: RET 0 ; AVX-LABEL: name: test_xor_v4i64 ; AVX: liveins: $ymm0, $ymm1 ; AVX-NEXT: {{ $}} ; AVX-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF ; AVX-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF ; AVX-NEXT: [[XOR:%[0-9]+]]:_(<4 x s64>) = G_XOR [[DEF]], [[DEF1]] ; AVX-NEXT: $ymm0 = COPY [[XOR]](<4 x s64>) ; AVX-NEXT: RET 0 %0(<4 x s64>) = IMPLICIT_DEF %1(<4 x s64>) = IMPLICIT_DEF %2(<4 x s64>) = G_XOR %0, %1 $ymm0 = COPY %2 RET 0 ...