272 lines
11 KiB
LLVM
272 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=hwasan -S | FileCheck %s
|
|
|
|
target triple = "aarch64--linux-android10000"
|
|
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
|
|
|
define void @load.v1i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.v1i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 2)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i32>, ptr [[P]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <1 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.v2i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.v2i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 3)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[P]], align 8
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <2 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.v4i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.v4i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 4)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[P]], align 16
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <4 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.v8i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.v8i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 32)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[P]], align 32
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <8 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.v16i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.v16i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 64)
|
|
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[P]], align 64
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <16 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @store.v1i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.v1i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 18)
|
|
; CHECK-NEXT: store <1 x i32> zeroinitializer, ptr [[P]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <1 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.v2i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.v2i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 19)
|
|
; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr [[P]], align 8
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <2 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.v4i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.v4i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 20)
|
|
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[P]], align 16
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <4 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.v8i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.v8i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 32)
|
|
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr [[P]], align 32
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <8 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.v16i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.v16i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 64)
|
|
; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr [[P]], align 64
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <16 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @load.nxv1i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.nxv1i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 32
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 1 x i32>, ptr [[P]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <vscale x 1 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.nxv2i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.nxv2i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 64
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[P]], align 8
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <vscale x 2 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.nxv4i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.nxv4i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 128
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 4 x i32>, ptr [[P]], align 16
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <vscale x 4 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.nxv8i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.nxv8i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 256
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 8 x i32>, ptr [[P]], align 32
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <vscale x 8 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @load.nxv16i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @load.nxv16i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 512
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i32>, ptr [[P]], align 64
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
load <vscale x 16 x i32>, ptr %p
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @store.nxv1i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.nxv1i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 32
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <vscale x 1 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.nxv2i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.nxv2i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 64
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <vscale x 2 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.nxv4i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.nxv4i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 128
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <vscale x 4 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.nxv8i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.nxv8i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 256
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <vscale x 8 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|
|
|
|
define void @store.nxv16i32(ptr %p) sanitize_hwaddress {
|
|
; CHECK-LABEL: @store.nxv16i32(
|
|
; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 512
|
|
; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
|
|
; CHECK-NEXT: call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
|
|
; CHECK-NEXT: store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
store <vscale x 16 x i32> zeroinitializer, ptr %p
|
|
ret void
|
|
}
|