; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=memcpyopt -S -verify-memoryssa | FileCheck %s ; Check that a call featuring a scalable-vector byval argument fed by a memcpy ; doesn't crash the compiler. It previously assumed the byval type's size could ; be represented as a known constant amount. define void @byval_caller(ptr %P) { ; CHECK-LABEL: @byval_caller( ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[A]], ptr align 4 [[P:%.*]], i64 8, i1 false) ; CHECK-NEXT: call void @byval_callee(ptr byval() align 1 [[A]]) ; CHECK-NEXT: ret void ; %a = alloca i8 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 %P, i64 8, i1 false) call void @byval_callee(ptr align 1 byval() %a) ret void } declare void @llvm.memcpy.p0.p0.i64(ptr align 4, ptr align 4, i64, i1) declare void @byval_callee(ptr align 1 byval()) ; Check that two scalable-vector stores (overlapping, with a constant offset) ; do not crash the compiler when checked whether or not they can be merged into ; a single memset. There was previously an assumption that the stored values' ; sizes could be represented by a known constant amount. define void @merge_stores_both_scalable(ptr %ptr) { ; CHECK-LABEL: @merge_stores_both_scalable( ; CHECK-NEXT: store zeroinitializer, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[PTR_NEXT:%.*]] = getelementptr i8, ptr [[PTR]], i64 1 ; CHECK-NEXT: store zeroinitializer, ptr [[PTR_NEXT]], align 1 ; CHECK-NEXT: ret void ; store zeroinitializer, ptr %ptr %ptr.next = getelementptr i8, ptr %ptr, i64 1 store zeroinitializer, ptr %ptr.next ret void } ; As above, but where the base is scalable but the subsequent store(s) are not. define void @merge_stores_first_scalable(ptr %ptr) { ; CHECK-LABEL: @merge_stores_first_scalable( ; CHECK-NEXT: store zeroinitializer, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[PTR_NEXT:%.*]] = getelementptr i8, ptr [[PTR]], i64 1 ; CHECK-NEXT: store i8 0, ptr [[PTR_NEXT]], align 1 ; CHECK-NEXT: ret void ; store zeroinitializer, ptr %ptr %ptr.next = getelementptr i8, ptr %ptr, i64 1 store i8 zeroinitializer, ptr %ptr.next ret void } ; As above, but where the base is not scalable but the subsequent store(s) are. define void @merge_stores_second_scalable(ptr %ptr) { ; CHECK-LABEL: @merge_stores_second_scalable( ; CHECK-NEXT: store i8 0, ptr [[PTR:%.*]], align 1 ; CHECK-NEXT: [[PTR_NEXT:%.*]] = getelementptr i8, ptr [[PTR]], i64 1 ; CHECK-NEXT: store zeroinitializer, ptr [[PTR_NEXT]], align 1 ; CHECK-NEXT: ret void ; store i8 zeroinitializer, ptr %ptr %ptr.next = getelementptr i8, ptr %ptr, i64 1 store zeroinitializer, ptr %ptr.next ret void } ; Check that the call-slot optimization doesn't crash when encountering scalable types. define void @callslotoptzn( %val, ptr %out) { ; CHECK-LABEL: @callslotoptzn( ; CHECK-NEXT: [[ALLOC:%.*]] = alloca , align 16 ; CHECK-NEXT: [[IDX:%.*]] = tail call @llvm.experimental.stepvector.nxv4i32() ; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds float, ptr [[ALLOC]], [[IDX]] ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4f32.nxv4p0( [[VAL:%.*]], [[STRIDE]], i32 4, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) ; CHECK-NEXT: [[LI:%.*]] = load , ptr [[ALLOC]], align 4 ; CHECK-NEXT: store [[LI]], ptr [[OUT:%.*]], align 4 ; CHECK-NEXT: ret void ; %alloc = alloca , align 16 %idx = tail call @llvm.experimental.stepvector.nxv4i32() %stride = getelementptr inbounds float, ptr %alloc, %idx call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( %val, %stride, i32 4, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) %li = load , ptr %alloc, align 4 store %li, ptr %out, align 4 ret void } declare @llvm.experimental.stepvector.nxv4i32() declare void @llvm.masked.scatter.nxv4f32.nxv4p0f32( , , i32, )