1715 lines
56 KiB
LLVM
1715 lines
56 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s | \
|
|
; RUN: FileCheck %s --check-prefix=LA64
|
|
|
|
;; TODO: Testing for LA32 architecture will be added later
|
|
|
|
define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i8_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB0_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB0_3: # in Loop: Header=BB0_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB0_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i8 %b acquire
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i16_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB1_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB1_3: # in Loop: Header=BB1_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB1_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i16 %b acquire
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i32_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i32 %b acquire
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i64_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i64 %b acquire
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i8_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB4_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB4_3: # in Loop: Header=BB4_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB4_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i8 %b acquire
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i16_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB5_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB5_3: # in Loop: Header=BB5_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB5_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i16 %b acquire
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i32_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i32 %b acquire
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i64_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i64 %b acquire
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i8_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a7, $a1, .LBB8_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB8_3: # in Loop: Header=BB8_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB8_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i8 %b acquire
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i16_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a7, $a1, .LBB9_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB9_3: # in Loop: Header=BB9_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB9_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i16 %b acquire
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i32_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i32 %b acquire
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i64_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i64 %b acquire
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i8_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a1, $a7, .LBB12_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB12_3: # in Loop: Header=BB12_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB12_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i8 %b acquire
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i16_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a1, $a7, .LBB13_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB13_3: # in Loop: Header=BB13_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB13_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i16 %b acquire
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i32_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i32 %b acquire
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i64_acquire:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i64 %b acquire
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i8_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB16_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB16_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB16_3: # in Loop: Header=BB16_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB16_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i8 %b release
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i16_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB17_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB17_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB17_3: # in Loop: Header=BB17_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB17_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i16 %b release
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i32_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i32 %b release
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i64_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i64 %b release
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i8_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB20_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB20_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB20_3: # in Loop: Header=BB20_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB20_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i8 %b release
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i16_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB21_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB21_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i16 %b release
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i32_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i32 %b release
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i64_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i64 %b release
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i8_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a7, $a1, .LBB24_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB24_3: # in Loop: Header=BB24_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB24_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i8 %b release
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i16_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a7, $a1, .LBB25_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB25_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB25_3: # in Loop: Header=BB25_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB25_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i16 %b release
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i32_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i32 %b release
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i64_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i64 %b release
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i8_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a1, $a7, .LBB28_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB28_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB28_3: # in Loop: Header=BB28_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB28_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i8 %b release
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i16_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a1, $a7, .LBB29_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB29_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB29_3: # in Loop: Header=BB29_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB29_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i16 %b release
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i32_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i32 %b release
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i64_release:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i64 %b release
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i8_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB32_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB32_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB32_3: # in Loop: Header=BB32_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB32_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i8 %b acq_rel
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i16_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB33_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB33_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB33_3: # in Loop: Header=BB33_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB33_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i16 %b acq_rel
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i32_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i32 %b acq_rel
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i64_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i64 %b acq_rel
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i8_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB36_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB36_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i8 %b acq_rel
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i16_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB37_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB37_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i16 %b acq_rel
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i32_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i32 %b acq_rel
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i64_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i64 %b acq_rel
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i8_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a7, $a1, .LBB40_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB40_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i8 %b acq_rel
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i16_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a7, $a1, .LBB41_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB41_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i16 %b acq_rel
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i32_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i32 %b acq_rel
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i64_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i64 %b acq_rel
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i8_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a1, $a7, .LBB44_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB44_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i8 %b acq_rel
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i16_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a1, $a7, .LBB45_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB45_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i16 %b acq_rel
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i32_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i32 %b acq_rel
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i64_acq_rel:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i64 %b acq_rel
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i8_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB48_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB48_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i8 %b seq_cst
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i16_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB49_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB49_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i16 %b seq_cst
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i32_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i32 %b seq_cst
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i64_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i64 %b seq_cst
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i8_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB52_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB52_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i8 %b seq_cst
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i16_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB53_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB53_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i16 %b seq_cst
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i32_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i32 %b seq_cst
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i64_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i64 %b seq_cst
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i8_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a7, $a1, .LBB56_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB56_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB56_3: # in Loop: Header=BB56_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB56_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i8 %b seq_cst
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i16_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a7, $a1, .LBB57_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB57_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB57_3: # in Loop: Header=BB57_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB57_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i16 %b seq_cst
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i32_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i32 %b seq_cst
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i64_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i64 %b seq_cst
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i8_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a1, $a7, .LBB60_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB60_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB60_3: # in Loop: Header=BB60_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB60_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i8 %b seq_cst
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i16_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a1, $a7, .LBB61_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB61_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB61_3: # in Loop: Header=BB61_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB61_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i16 %b seq_cst
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i32_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i32 %b seq_cst
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i64_seq_cst:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i64 %b seq_cst
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i8_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB64_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB64_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB64_3: # in Loop: Header=BB64_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB64_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i8 %b monotonic
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i16_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a6, $a1, .LBB65_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB65_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB65_3: # in Loop: Header=BB65_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB65_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i16 %b monotonic
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i32_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i32 %b monotonic
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umax_i64_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umax ptr %a, i64 %b monotonic
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i8_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: andi $a1, $a1, 255
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a3
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB68_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB68_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a3
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB68_3: # in Loop: Header=BB68_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB68_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i8 %b monotonic
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i16_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: lu12i.w $a2, 15
|
|
; LA64-NEXT: ori $a2, $a2, 4095
|
|
; LA64-NEXT: slli.d $a3, $a0, 3
|
|
; LA64-NEXT: sll.w $a2, $a2, $a3
|
|
; LA64-NEXT: addi.w $a2, $a2, 0
|
|
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
|
|
; LA64-NEXT: sll.w $a1, $a1, $a3
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a4, $a0, 0
|
|
; LA64-NEXT: and $a6, $a4, $a2
|
|
; LA64-NEXT: move $a5, $a4
|
|
; LA64-NEXT: bgeu $a1, $a6, .LBB69_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB69_1 Depth=1
|
|
; LA64-NEXT: xor $a5, $a4, $a1
|
|
; LA64-NEXT: and $a5, $a5, $a2
|
|
; LA64-NEXT: xor $a5, $a4, $a5
|
|
; LA64-NEXT: .LBB69_3: # in Loop: Header=BB69_1 Depth=1
|
|
; LA64-NEXT: sc.w $a5, $a0, 0
|
|
; LA64-NEXT: beqz $a5, .LBB69_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a4, $a3
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i16 %b monotonic
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i32_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.wu $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i32 %b monotonic
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_umin_i64_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.du $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw umin ptr %a, i64 %b monotonic
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i8_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB72_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a7, $a1, .LBB72_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB72_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB72_3: # in Loop: Header=BB72_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB72_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i8 %b monotonic
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i16_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB73_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a7, $a1, .LBB73_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB73_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB73_3: # in Loop: Header=BB73_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB73_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i16 %b monotonic
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i32_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i32 %b monotonic
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_max_i64_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammax_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw max ptr %a, i64 %b monotonic
|
|
ret i64 %1
|
|
}
|
|
|
|
define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i8_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: ori $a3, $zero, 255
|
|
; LA64-NEXT: sll.w $a3, $a3, $a2
|
|
; LA64-NEXT: addi.w $a3, $a3, 0
|
|
; LA64-NEXT: ext.w.b $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: andi $a4, $a2, 24
|
|
; LA64-NEXT: xori $a4, $a4, 56
|
|
; LA64-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a3
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a4
|
|
; LA64-NEXT: sra.w $a7, $a7, $a4
|
|
; LA64-NEXT: bge $a1, $a7, .LBB76_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB76_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a3
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB76_3: # in Loop: Header=BB76_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB76_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i8 %b monotonic
|
|
ret i8 %1
|
|
}
|
|
|
|
define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i16_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: slli.d $a2, $a0, 3
|
|
; LA64-NEXT: andi $a3, $a2, 24
|
|
; LA64-NEXT: ori $a4, $zero, 48
|
|
; LA64-NEXT: sub.d $a3, $a4, $a3
|
|
; LA64-NEXT: lu12i.w $a4, 15
|
|
; LA64-NEXT: ori $a4, $a4, 4095
|
|
; LA64-NEXT: sll.w $a4, $a4, $a2
|
|
; LA64-NEXT: addi.w $a4, $a4, 0
|
|
; LA64-NEXT: ext.w.h $a1, $a1
|
|
; LA64-NEXT: sll.w $a1, $a1, $a2
|
|
; LA64-NEXT: addi.w $a1, $a1, 0
|
|
; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
|
|
; LA64-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1
|
|
; LA64-NEXT: ll.w $a5, $a0, 0
|
|
; LA64-NEXT: and $a7, $a5, $a4
|
|
; LA64-NEXT: move $a6, $a5
|
|
; LA64-NEXT: sll.w $a7, $a7, $a3
|
|
; LA64-NEXT: sra.w $a7, $a7, $a3
|
|
; LA64-NEXT: bge $a1, $a7, .LBB77_3
|
|
; LA64-NEXT: # %bb.2: # in Loop: Header=BB77_1 Depth=1
|
|
; LA64-NEXT: xor $a6, $a5, $a1
|
|
; LA64-NEXT: and $a6, $a6, $a4
|
|
; LA64-NEXT: xor $a6, $a5, $a6
|
|
; LA64-NEXT: .LBB77_3: # in Loop: Header=BB77_1 Depth=1
|
|
; LA64-NEXT: sc.w $a6, $a0, 0
|
|
; LA64-NEXT: beqz $a6, .LBB77_1
|
|
; LA64-NEXT: # %bb.4:
|
|
; LA64-NEXT: srl.w $a0, $a5, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i16 %b monotonic
|
|
ret i16 %1
|
|
}
|
|
|
|
define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i32_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.w $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i32 %b monotonic
|
|
ret i32 %1
|
|
}
|
|
|
|
define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
|
|
; LA64-LABEL: atomicrmw_min_i64_monotonic:
|
|
; LA64: # %bb.0:
|
|
; LA64-NEXT: ammin_db.d $a2, $a1, $a0
|
|
; LA64-NEXT: move $a0, $a2
|
|
; LA64-NEXT: ret
|
|
%1 = atomicrmw min ptr %a, i64 %b monotonic
|
|
ret i64 %1
|
|
}
|