1332 lines
48 KiB
C++
1332 lines
48 KiB
C++
//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
/// \file
|
|
/// This file implements the targeting of the InstructionSelector class for
|
|
/// RISC-V.
|
|
/// \todo This should be generated by TableGen.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "MCTargetDesc/RISCVMatInt.h"
|
|
#include "RISCVRegisterBankInfo.h"
|
|
#include "RISCVSubtarget.h"
|
|
#include "RISCVTargetMachine.h"
|
|
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
|
|
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
|
|
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
|
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
|
|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
|
#include "llvm/IR/IntrinsicsRISCV.h"
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#define DEBUG_TYPE "riscv-isel"
|
|
|
|
using namespace llvm;
|
|
using namespace MIPatternMatch;
|
|
|
|
#define GET_GLOBALISEL_PREDICATE_BITSET
|
|
#include "RISCVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
namespace {
|
|
|
|
class RISCVInstructionSelector : public InstructionSelector {
|
|
public:
|
|
RISCVInstructionSelector(const RISCVTargetMachine &TM,
|
|
const RISCVSubtarget &STI,
|
|
const RISCVRegisterBankInfo &RBI);
|
|
|
|
bool select(MachineInstr &MI) override;
|
|
static const char *getName() { return DEBUG_TYPE; }
|
|
|
|
private:
|
|
const TargetRegisterClass *
|
|
getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
|
|
|
|
bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
|
|
bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
|
|
|
|
// tblgen-erated 'select' implementation, used as the initial selector for
|
|
// the patterns that don't require complex C++.
|
|
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
|
|
|
|
// A lowering phase that runs before any selection attempts.
|
|
// Returns true if the instruction was modified.
|
|
void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI);
|
|
|
|
bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI);
|
|
|
|
// Custom selection methods
|
|
bool selectCopy(MachineInstr &MI, MachineRegisterInfo &MRI) const;
|
|
bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const;
|
|
bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
|
|
bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI, bool IsLocal = true,
|
|
bool IsExternWeak = false) const;
|
|
bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const;
|
|
bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const;
|
|
bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const;
|
|
bool selectIntrinsicWithSideEffects(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const;
|
|
void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
|
|
MachineIRBuilder &MIB) const;
|
|
bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const;
|
|
bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const;
|
|
|
|
ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
|
|
ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
|
|
|
|
ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
|
|
template <unsigned ShAmt>
|
|
ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
|
|
return selectSHXADDOp(Root, ShAmt);
|
|
}
|
|
|
|
ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
|
|
unsigned ShAmt) const;
|
|
template <unsigned ShAmt>
|
|
ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
|
|
return selectSHXADD_UWOp(Root, ShAmt);
|
|
}
|
|
|
|
// Custom renderers for tablegen
|
|
void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
|
|
int OpIdx) const;
|
|
void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
|
|
int OpIdx) const;
|
|
void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
|
|
int OpIdx) const;
|
|
void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
|
|
int OpIdx) const;
|
|
void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
|
|
int OpIdx) const;
|
|
|
|
void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
|
|
int OpIdx) const;
|
|
|
|
const RISCVSubtarget &STI;
|
|
const RISCVInstrInfo &TII;
|
|
const RISCVRegisterInfo &TRI;
|
|
const RISCVRegisterBankInfo &RBI;
|
|
const RISCVTargetMachine &TM;
|
|
|
|
// FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
|
|
// uses "STI." in the code generated by TableGen. We need to unify the name of
|
|
// Subtarget variable.
|
|
const RISCVSubtarget *Subtarget = &STI;
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_DECL
|
|
#include "RISCVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATES_DECL
|
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_DECL
|
|
#include "RISCVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_TEMPORARIES_DECL
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
#define GET_GLOBALISEL_IMPL
|
|
#include "RISCVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_IMPL
|
|
|
|
RISCVInstructionSelector::RISCVInstructionSelector(
|
|
const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
|
|
const RISCVRegisterBankInfo &RBI)
|
|
: STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
|
|
TM(TM),
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_INIT
|
|
#include "RISCVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATES_INIT
|
|
#define GET_GLOBALISEL_TEMPORARIES_INIT
|
|
#include "RISCVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
|
|
{
|
|
}
|
|
|
|
InstructionSelector::ComplexRendererFns
|
|
RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const {
|
|
if (!Root.isReg())
|
|
return std::nullopt;
|
|
|
|
using namespace llvm::MIPatternMatch;
|
|
MachineRegisterInfo &MRI = MF->getRegInfo();
|
|
|
|
Register RootReg = Root.getReg();
|
|
Register ShAmtReg = RootReg;
|
|
const LLT ShiftLLT = MRI.getType(RootReg);
|
|
unsigned ShiftWidth = ShiftLLT.getSizeInBits();
|
|
assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
|
|
// Peek through zext.
|
|
Register ZExtSrcReg;
|
|
if (mi_match(ShAmtReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) {
|
|
ShAmtReg = ZExtSrcReg;
|
|
}
|
|
|
|
APInt AndMask;
|
|
Register AndSrcReg;
|
|
if (mi_match(ShAmtReg, MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
|
|
APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
|
|
if (ShMask.isSubsetOf(AndMask)) {
|
|
ShAmtReg = AndSrcReg;
|
|
} else {
|
|
// SimplifyDemandedBits may have optimized the mask so try restoring any
|
|
// bits that are known zero.
|
|
KnownBits Known = KB->getKnownBits(ShAmtReg);
|
|
if (ShMask.isSubsetOf(AndMask | Known.Zero))
|
|
ShAmtReg = AndSrcReg;
|
|
}
|
|
}
|
|
|
|
APInt Imm;
|
|
Register Reg;
|
|
if (mi_match(ShAmtReg, MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
|
|
if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
|
|
// If we are shifting by X+N where N == 0 mod Size, then just shift by X
|
|
// to avoid the ADD.
|
|
ShAmtReg = Reg;
|
|
} else if (mi_match(ShAmtReg, MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
|
|
if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
|
|
// If we are shifting by N-X where N == 0 mod Size, then just shift by -X
|
|
// to generate a NEG instead of a SUB of a constant.
|
|
ShAmtReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
|
|
return {{[=](MachineInstrBuilder &MIB) {
|
|
MachineIRBuilder(*MIB.getInstr())
|
|
.buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
|
|
MIB.addReg(ShAmtReg);
|
|
}}};
|
|
}
|
|
if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
|
|
// If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
|
|
// to generate a NOT instead of a SUB of a constant.
|
|
ShAmtReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
return {{[=](MachineInstrBuilder &MIB) {
|
|
MachineIRBuilder(*MIB.getInstr())
|
|
.buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
|
|
.addImm(-1);
|
|
MIB.addReg(ShAmtReg);
|
|
}}};
|
|
}
|
|
}
|
|
|
|
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
|
|
}
|
|
|
|
InstructionSelector::ComplexRendererFns
|
|
RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
|
|
unsigned ShAmt) const {
|
|
using namespace llvm::MIPatternMatch;
|
|
MachineFunction &MF = *Root.getParent()->getParent()->getParent();
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
if (!Root.isReg())
|
|
return std::nullopt;
|
|
Register RootReg = Root.getReg();
|
|
|
|
const unsigned XLen = STI.getXLen();
|
|
APInt Mask, C2;
|
|
Register RegY;
|
|
std::optional<bool> LeftShift;
|
|
// (and (shl y, c2), mask)
|
|
if (mi_match(RootReg, MRI,
|
|
m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
|
|
LeftShift = true;
|
|
// (and (lshr y, c2), mask)
|
|
else if (mi_match(RootReg, MRI,
|
|
m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
|
|
LeftShift = false;
|
|
|
|
if (LeftShift.has_value()) {
|
|
if (*LeftShift)
|
|
Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
|
|
else
|
|
Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
|
|
|
|
if (Mask.isShiftedMask()) {
|
|
unsigned Leading = XLen - Mask.getActiveBits();
|
|
unsigned Trailing = Mask.countr_zero();
|
|
// Given (and (shl y, c2), mask) in which mask has no leading zeros and
|
|
// c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
|
|
if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
|
|
Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
return {{[=](MachineInstrBuilder &MIB) {
|
|
MachineIRBuilder(*MIB.getInstr())
|
|
.buildInstr(RISCV::SRLI, {DstReg}, {RegY})
|
|
.addImm(Trailing - C2.getLimitedValue());
|
|
MIB.addReg(DstReg);
|
|
}}};
|
|
}
|
|
|
|
// Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
|
|
// c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
|
|
if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
|
|
Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
return {{[=](MachineInstrBuilder &MIB) {
|
|
MachineIRBuilder(*MIB.getInstr())
|
|
.buildInstr(RISCV::SRLI, {DstReg}, {RegY})
|
|
.addImm(Leading + Trailing);
|
|
MIB.addReg(DstReg);
|
|
}}};
|
|
}
|
|
}
|
|
}
|
|
|
|
LeftShift.reset();
|
|
|
|
// (shl (and y, mask), c2)
|
|
if (mi_match(RootReg, MRI,
|
|
m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
|
|
m_ICst(C2))))
|
|
LeftShift = true;
|
|
// (lshr (and y, mask), c2)
|
|
else if (mi_match(RootReg, MRI,
|
|
m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
|
|
m_ICst(C2))))
|
|
LeftShift = false;
|
|
|
|
if (LeftShift.has_value() && Mask.isShiftedMask()) {
|
|
unsigned Leading = XLen - Mask.getActiveBits();
|
|
unsigned Trailing = Mask.countr_zero();
|
|
|
|
// Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
|
|
// c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
|
|
bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
|
|
(Trailing + C2.getLimitedValue()) == ShAmt;
|
|
if (!Cond)
|
|
// Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
|
|
// c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
|
|
Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
|
|
(Trailing - C2.getLimitedValue()) == ShAmt;
|
|
|
|
if (Cond) {
|
|
Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
return {{[=](MachineInstrBuilder &MIB) {
|
|
MachineIRBuilder(*MIB.getInstr())
|
|
.buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
|
|
.addImm(Trailing);
|
|
MIB.addReg(DstReg);
|
|
}}};
|
|
}
|
|
}
|
|
|
|
return std::nullopt;
|
|
}
|
|
|
|
InstructionSelector::ComplexRendererFns
|
|
RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
|
|
unsigned ShAmt) const {
|
|
using namespace llvm::MIPatternMatch;
|
|
MachineFunction &MF = *Root.getParent()->getParent()->getParent();
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
if (!Root.isReg())
|
|
return std::nullopt;
|
|
Register RootReg = Root.getReg();
|
|
|
|
// Given (and (shl x, c2), mask) in which mask is a shifted mask with
|
|
// 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
|
|
// c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
|
|
APInt Mask, C2;
|
|
Register RegX;
|
|
if (mi_match(
|
|
RootReg, MRI,
|
|
m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
|
|
m_ICst(Mask))))) {
|
|
Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
|
|
|
|
if (Mask.isShiftedMask()) {
|
|
unsigned Leading = Mask.countl_zero();
|
|
unsigned Trailing = Mask.countr_zero();
|
|
if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
|
|
Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
return {{[=](MachineInstrBuilder &MIB) {
|
|
MachineIRBuilder(*MIB.getInstr())
|
|
.buildInstr(RISCV::SLLI, {DstReg}, {RegX})
|
|
.addImm(C2.getLimitedValue() - ShAmt);
|
|
MIB.addReg(DstReg);
|
|
}}};
|
|
}
|
|
}
|
|
}
|
|
|
|
return std::nullopt;
|
|
}
|
|
|
|
InstructionSelector::ComplexRendererFns
|
|
RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
|
|
MachineFunction &MF = *Root.getParent()->getParent()->getParent();
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
if (!Root.isReg())
|
|
return std::nullopt;
|
|
|
|
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
|
|
if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
|
|
return {{
|
|
[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
|
|
}};
|
|
}
|
|
|
|
if (isBaseWithConstantOffset(Root, MRI)) {
|
|
MachineOperand &LHS = RootDef->getOperand(1);
|
|
MachineOperand &RHS = RootDef->getOperand(2);
|
|
MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
|
|
MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
|
|
|
|
int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
|
|
if (isInt<12>(RHSC)) {
|
|
if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
|
|
return {{
|
|
[=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
|
|
}};
|
|
|
|
return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
|
|
}
|
|
}
|
|
|
|
// TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
|
|
// the combiner?
|
|
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
|
|
}
|
|
|
|
/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
|
|
/// CC Must be an ICMP Predicate.
|
|
static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
|
|
switch (CC) {
|
|
default:
|
|
llvm_unreachable("Expected ICMP CmpInst::Predicate.");
|
|
case CmpInst::Predicate::ICMP_EQ:
|
|
return RISCVCC::COND_EQ;
|
|
case CmpInst::Predicate::ICMP_NE:
|
|
return RISCVCC::COND_NE;
|
|
case CmpInst::Predicate::ICMP_ULT:
|
|
return RISCVCC::COND_LTU;
|
|
case CmpInst::Predicate::ICMP_SLT:
|
|
return RISCVCC::COND_LT;
|
|
case CmpInst::Predicate::ICMP_UGE:
|
|
return RISCVCC::COND_GEU;
|
|
case CmpInst::Predicate::ICMP_SGE:
|
|
return RISCVCC::COND_GE;
|
|
}
|
|
}
|
|
|
|
static void getOperandsForBranch(Register CondReg, MachineRegisterInfo &MRI,
|
|
RISCVCC::CondCode &CC, Register &LHS,
|
|
Register &RHS) {
|
|
// Try to fold an ICmp. If that fails, use a NE compare with X0.
|
|
CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
|
|
if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
|
|
LHS = CondReg;
|
|
RHS = RISCV::X0;
|
|
CC = RISCVCC::COND_NE;
|
|
return;
|
|
}
|
|
|
|
// We found an ICmp, do some canonicalizations.
|
|
|
|
// Adjust comparisons to use comparison with 0 if possible.
|
|
if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
|
|
switch (Pred) {
|
|
case CmpInst::Predicate::ICMP_SGT:
|
|
// Convert X > -1 to X >= 0
|
|
if (*Constant == -1) {
|
|
CC = RISCVCC::COND_GE;
|
|
RHS = RISCV::X0;
|
|
return;
|
|
}
|
|
break;
|
|
case CmpInst::Predicate::ICMP_SLT:
|
|
// Convert X < 1 to 0 >= X
|
|
if (*Constant == 1) {
|
|
CC = RISCVCC::COND_GE;
|
|
RHS = LHS;
|
|
LHS = RISCV::X0;
|
|
return;
|
|
}
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
switch (Pred) {
|
|
default:
|
|
llvm_unreachable("Expected ICMP CmpInst::Predicate.");
|
|
case CmpInst::Predicate::ICMP_EQ:
|
|
case CmpInst::Predicate::ICMP_NE:
|
|
case CmpInst::Predicate::ICMP_ULT:
|
|
case CmpInst::Predicate::ICMP_SLT:
|
|
case CmpInst::Predicate::ICMP_UGE:
|
|
case CmpInst::Predicate::ICMP_SGE:
|
|
// These CCs are supported directly by RISC-V branches.
|
|
break;
|
|
case CmpInst::Predicate::ICMP_SGT:
|
|
case CmpInst::Predicate::ICMP_SLE:
|
|
case CmpInst::Predicate::ICMP_UGT:
|
|
case CmpInst::Predicate::ICMP_ULE:
|
|
// These CCs are not supported directly by RISC-V branches, but changing the
|
|
// direction of the CC and swapping LHS and RHS are.
|
|
Pred = CmpInst::getSwappedPredicate(Pred);
|
|
std::swap(LHS, RHS);
|
|
break;
|
|
}
|
|
|
|
CC = getRISCVCCFromICmp(Pred);
|
|
return;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::select(MachineInstr &MI) {
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
MachineFunction &MF = *MBB.getParent();
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
MachineIRBuilder MIB(MI);
|
|
|
|
preISelLower(MI, MIB, MRI);
|
|
const unsigned Opc = MI.getOpcode();
|
|
|
|
if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
|
|
if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
|
|
const Register DefReg = MI.getOperand(0).getReg();
|
|
const LLT DefTy = MRI.getType(DefReg);
|
|
|
|
const RegClassOrRegBank &RegClassOrBank =
|
|
MRI.getRegClassOrRegBank(DefReg);
|
|
|
|
const TargetRegisterClass *DefRC =
|
|
RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
|
|
if (!DefRC) {
|
|
if (!DefTy.isValid()) {
|
|
LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
|
|
return false;
|
|
}
|
|
|
|
const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
|
|
DefRC = getRegClassForTypeOnBank(DefTy, RB);
|
|
if (!DefRC) {
|
|
LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
|
|
return false;
|
|
}
|
|
}
|
|
|
|
MI.setDesc(TII.get(TargetOpcode::PHI));
|
|
return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
|
|
}
|
|
|
|
// Certain non-generic instructions also need some special handling.
|
|
if (MI.isCopy())
|
|
return selectCopy(MI, MRI);
|
|
|
|
return true;
|
|
}
|
|
|
|
if (selectImpl(MI, *CoverageInfo))
|
|
return true;
|
|
|
|
switch (Opc) {
|
|
case TargetOpcode::G_ANYEXT:
|
|
case TargetOpcode::G_PTRTOINT:
|
|
case TargetOpcode::G_INTTOPTR:
|
|
case TargetOpcode::G_TRUNC:
|
|
return selectCopy(MI, MRI);
|
|
case TargetOpcode::G_CONSTANT: {
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
|
|
|
|
if (!materializeImm(DstReg, Imm, MIB))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
case TargetOpcode::G_FCONSTANT: {
|
|
// TODO: Use constant pool for complext constants.
|
|
// TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
|
|
APInt Imm = FPimm.bitcastToAPInt();
|
|
unsigned Size = MRI.getType(DstReg).getSizeInBits();
|
|
if (Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
|
|
Register GPRReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
|
|
return false;
|
|
|
|
unsigned Opcode = Size == 64 ? RISCV::FMV_D_X : RISCV::FMV_W_X;
|
|
auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
|
|
if (!FMV.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
} else {
|
|
assert(Size == 64 && !Subtarget->is64Bit() &&
|
|
"Unexpected size or subtarget");
|
|
// Split into two pieces and build through the stack.
|
|
Register GPRRegHigh = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
Register GPRRegLow = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
|
|
MIB))
|
|
return false;
|
|
if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
|
|
return false;
|
|
MachineInstrBuilder PairF64 = MIB.buildInstr(
|
|
RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
|
|
if (!PairF64.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
case TargetOpcode::G_GLOBAL_VALUE: {
|
|
auto *GV = MI.getOperand(1).getGlobal();
|
|
if (GV->isThreadLocal()) {
|
|
// TODO: implement this case.
|
|
return false;
|
|
}
|
|
|
|
return selectAddr(MI, MIB, MRI, GV->isDSOLocal(),
|
|
GV->hasExternalWeakLinkage());
|
|
}
|
|
case TargetOpcode::G_JUMP_TABLE:
|
|
case TargetOpcode::G_CONSTANT_POOL:
|
|
return selectAddr(MI, MIB, MRI);
|
|
case TargetOpcode::G_BRCOND: {
|
|
Register LHS, RHS;
|
|
RISCVCC::CondCode CC;
|
|
getOperandsForBranch(MI.getOperand(0).getReg(), MRI, CC, LHS, RHS);
|
|
|
|
auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
|
|
.addMBB(MI.getOperand(1).getMBB());
|
|
MI.eraseFromParent();
|
|
return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
|
|
}
|
|
case TargetOpcode::G_BRJT: {
|
|
// FIXME: Move to legalization?
|
|
const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
|
|
unsigned EntrySize = MJTI->getEntrySize(MF.getDataLayout());
|
|
assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) &&
|
|
"Unsupported jump-table entry size");
|
|
assert(
|
|
(MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 ||
|
|
MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 ||
|
|
MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) &&
|
|
"Unexpected jump-table entry kind");
|
|
|
|
auto SLL =
|
|
MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)})
|
|
.addImm(Log2_32(EntrySize));
|
|
if (!SLL.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
|
|
// TODO: Use SHXADD. Moving to legalization would fix this automatically.
|
|
auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
|
|
{MI.getOperand(0), SLL.getReg(0)});
|
|
if (!ADD.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
|
|
unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW;
|
|
auto Dest =
|
|
MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)})
|
|
.addImm(0)
|
|
.addMemOperand(MF.getMachineMemOperand(
|
|
MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad,
|
|
EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout()))));
|
|
if (!Dest.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
|
|
// If the Kind is EK_LabelDifference32, the table stores an offset from
|
|
// the location of the table. Add the table address to get an absolute
|
|
// address.
|
|
if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) {
|
|
Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
|
|
{Dest.getReg(0), MI.getOperand(0)});
|
|
if (!Dest.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
}
|
|
|
|
auto Branch =
|
|
MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0);
|
|
if (!Branch.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
case TargetOpcode::G_BRINDIRECT:
|
|
MI.setDesc(TII.get(RISCV::PseudoBRIND));
|
|
MI.addOperand(MachineOperand::CreateImm(0));
|
|
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
|
|
case TargetOpcode::G_SEXT_INREG:
|
|
return selectSExtInreg(MI, MIB);
|
|
case TargetOpcode::G_FRAME_INDEX: {
|
|
// TODO: We may want to replace this code with the SelectionDAG patterns,
|
|
// which fail to get imported because it uses FrameAddrRegImm, which is a
|
|
// ComplexPattern
|
|
MI.setDesc(TII.get(RISCV::ADDI));
|
|
MI.addOperand(MachineOperand::CreateImm(0));
|
|
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
|
|
}
|
|
case TargetOpcode::G_SELECT:
|
|
return selectSelect(MI, MIB, MRI);
|
|
case TargetOpcode::G_FCMP:
|
|
return selectFPCompare(MI, MIB, MRI);
|
|
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
|
|
return selectIntrinsicWithSideEffects(MI, MIB, MRI);
|
|
case TargetOpcode::G_FENCE: {
|
|
AtomicOrdering FenceOrdering =
|
|
static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
|
|
SyncScope::ID FenceSSID =
|
|
static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
|
|
emitFence(FenceOrdering, FenceSSID, MIB);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
case TargetOpcode::G_IMPLICIT_DEF:
|
|
return selectImplicitDef(MI, MIB, MRI);
|
|
case TargetOpcode::G_MERGE_VALUES:
|
|
return selectMergeValues(MI, MIB, MRI);
|
|
case TargetOpcode::G_UNMERGE_VALUES:
|
|
return selectUnmergeValues(MI, MIB, MRI);
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectMergeValues(
|
|
MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
|
|
|
|
// Build a F64 Pair from operands
|
|
if (MI.getNumOperands() != 3)
|
|
return false;
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Lo = MI.getOperand(1).getReg();
|
|
Register Hi = MI.getOperand(2).getReg();
|
|
if (!isRegInFprb(Dst, MRI) || !isRegInGprb(Lo, MRI) || !isRegInGprb(Hi, MRI))
|
|
return false;
|
|
MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
|
|
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectUnmergeValues(
|
|
MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
|
|
|
|
// Split F64 Src into two s32 parts
|
|
if (MI.getNumOperands() != 3)
|
|
return false;
|
|
Register Src = MI.getOperand(2).getReg();
|
|
Register Lo = MI.getOperand(0).getReg();
|
|
Register Hi = MI.getOperand(1).getReg();
|
|
if (!isRegInFprb(Src, MRI) || !isRegInGprb(Lo, MRI) || !isRegInGprb(Hi, MRI))
|
|
return false;
|
|
MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
|
|
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
|
|
}
|
|
|
|
bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
|
|
MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) {
|
|
Register PtrReg = Op.getReg();
|
|
assert(MRI.getType(PtrReg).isPointer() && "Operand is not a pointer!");
|
|
|
|
const LLT sXLen = LLT::scalar(STI.getXLen());
|
|
auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
|
|
MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
|
|
Op.setReg(PtrToInt.getReg(0));
|
|
return select(*PtrToInt);
|
|
}
|
|
|
|
void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
|
|
MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) {
|
|
switch (MI.getOpcode()) {
|
|
case TargetOpcode::G_PTR_ADD: {
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
const LLT sXLen = LLT::scalar(STI.getXLen());
|
|
|
|
replacePtrWithInt(MI.getOperand(1), MIB, MRI);
|
|
MI.setDesc(TII.get(TargetOpcode::G_ADD));
|
|
MRI.setType(DstReg, sXLen);
|
|
break;
|
|
}
|
|
case TargetOpcode::G_PTRMASK: {
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
const LLT sXLen = LLT::scalar(STI.getXLen());
|
|
replacePtrWithInt(MI.getOperand(1), MIB, MRI);
|
|
MI.setDesc(TII.get(TargetOpcode::G_AND));
|
|
MRI.setType(DstReg, sXLen);
|
|
}
|
|
}
|
|
}
|
|
|
|
void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
|
|
const MachineInstr &MI,
|
|
int OpIdx) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
|
|
MIB.addImm(-CstVal);
|
|
}
|
|
|
|
void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
|
|
const MachineInstr &MI,
|
|
int OpIdx) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
|
|
MIB.addImm(STI.getXLen() - CstVal);
|
|
}
|
|
|
|
void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
|
|
const MachineInstr &MI,
|
|
int OpIdx) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
|
|
MIB.addImm(32 - CstVal);
|
|
}
|
|
|
|
void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
|
|
const MachineInstr &MI,
|
|
int OpIdx) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
|
|
MIB.addImm(CstVal + 1);
|
|
}
|
|
|
|
void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
|
|
const MachineInstr &MI,
|
|
int OpIdx) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
|
|
MIB.addImm(CstVal);
|
|
}
|
|
|
|
void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
|
|
const MachineInstr &MI,
|
|
int OpIdx) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
|
|
MIB.addImm(llvm::countr_zero(C));
|
|
}
|
|
|
|
const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
|
|
LLT Ty, const RegisterBank &RB) const {
|
|
if (RB.getID() == RISCV::GPRBRegBankID) {
|
|
if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
|
|
return &RISCV::GPRRegClass;
|
|
}
|
|
|
|
if (RB.getID() == RISCV::FPRBRegBankID) {
|
|
if (Ty.getSizeInBits() == 32)
|
|
return &RISCV::FPR32RegClass;
|
|
if (Ty.getSizeInBits() == 64)
|
|
return &RISCV::FPR64RegClass;
|
|
}
|
|
|
|
// TODO: Non-GPR register classes.
|
|
return nullptr;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::isRegInGprb(Register Reg,
|
|
MachineRegisterInfo &MRI) const {
|
|
return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::GPRBRegBankID;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::isRegInFprb(Register Reg,
|
|
MachineRegisterInfo &MRI) const {
|
|
return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::FPRBRegBankID;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectCopy(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI) const {
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
|
|
if (DstReg.isPhysical())
|
|
return true;
|
|
|
|
const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
|
|
MRI.getType(DstReg), *RBI.getRegBank(DstReg, MRI, TRI));
|
|
assert(DstRC &&
|
|
"Register class not available for LLT, register bank combination");
|
|
|
|
// No need to constrain SrcReg. It will get constrained when
|
|
// we hit another of its uses or its defs.
|
|
// Copies do not have constraints.
|
|
if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
|
|
<< " operand\n");
|
|
return false;
|
|
}
|
|
|
|
MI.setDesc(TII.get(RISCV::COPY));
|
|
return true;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectImplicitDef(
|
|
MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
|
|
|
|
const Register DstReg = MI.getOperand(0).getReg();
|
|
const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
|
|
MRI.getType(DstReg), *RBI.getRegBank(DstReg, MRI, TRI));
|
|
|
|
assert(DstRC &&
|
|
"Register class not available for LLT, register bank combination");
|
|
|
|
if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
|
|
<< " operand\n");
|
|
}
|
|
MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
|
|
return true;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
|
|
MachineIRBuilder &MIB) const {
|
|
MachineRegisterInfo &MRI = *MIB.getMRI();
|
|
|
|
if (Imm == 0) {
|
|
MIB.buildCopy(DstReg, Register(RISCV::X0));
|
|
RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, MRI);
|
|
return true;
|
|
}
|
|
|
|
RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
|
|
unsigned NumInsts = Seq.size();
|
|
Register SrcReg = RISCV::X0;
|
|
|
|
for (unsigned i = 0; i < NumInsts; i++) {
|
|
Register TmpReg = i < NumInsts - 1
|
|
? MRI.createVirtualRegister(&RISCV::GPRRegClass)
|
|
: DstReg;
|
|
const RISCVMatInt::Inst &I = Seq[i];
|
|
MachineInstr *Result;
|
|
|
|
switch (I.getOpndKind()) {
|
|
case RISCVMatInt::Imm:
|
|
// clang-format off
|
|
Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
|
|
.addImm(I.getImm());
|
|
// clang-format on
|
|
break;
|
|
case RISCVMatInt::RegX0:
|
|
Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
|
|
{SrcReg, Register(RISCV::X0)});
|
|
break;
|
|
case RISCVMatInt::RegReg:
|
|
Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
|
|
break;
|
|
case RISCVMatInt::RegImm:
|
|
Result =
|
|
MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
|
|
break;
|
|
}
|
|
|
|
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
|
|
return false;
|
|
|
|
SrcReg = TmpReg;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
|
|
MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI,
|
|
bool IsLocal,
|
|
bool IsExternWeak) const {
|
|
assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
|
|
MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
|
|
MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
|
|
"Unexpected opcode");
|
|
|
|
const MachineOperand &DispMO = MI.getOperand(1);
|
|
|
|
Register DefReg = MI.getOperand(0).getReg();
|
|
const LLT DefTy = MRI.getType(DefReg);
|
|
|
|
// When HWASAN is used and tagging of global variables is enabled
|
|
// they should be accessed via the GOT, since the tagged address of a global
|
|
// is incompatible with existing code models. This also applies to non-pic
|
|
// mode.
|
|
if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
|
|
if (IsLocal && !Subtarget->allowTaggedGlobals()) {
|
|
// Use PC-relative addressing to access the symbol. This generates the
|
|
// pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
|
|
// %pcrel_lo(auipc)).
|
|
MI.setDesc(TII.get(RISCV::PseudoLLA));
|
|
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
|
|
}
|
|
|
|
// Use PC-relative addressing to access the GOT for this symbol, then
|
|
// load the address from the GOT. This generates the pattern (PseudoLGA
|
|
// sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
|
|
// %pcrel_lo(auipc))).
|
|
MachineFunction &MF = *MI.getParent()->getParent();
|
|
MachineMemOperand *MemOp = MF.getMachineMemOperand(
|
|
MachinePointerInfo::getGOT(MF),
|
|
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
|
|
MachineMemOperand::MOInvariant,
|
|
DefTy, Align(DefTy.getSizeInBits() / 8));
|
|
|
|
auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
|
|
.addDisp(DispMO, 0)
|
|
.addMemOperand(MemOp);
|
|
|
|
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
switch (TM.getCodeModel()) {
|
|
default: {
|
|
reportGISelFailure(const_cast<MachineFunction &>(*MF), *TPC, *MORE,
|
|
getName(), "Unsupported code model for lowering", MI);
|
|
return false;
|
|
}
|
|
case CodeModel::Small: {
|
|
// Must lie within a single 2 GiB address range and must lie between
|
|
// absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
|
|
// (lui %hi(sym)) %lo(sym)).
|
|
Register AddrHiDest = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
|
|
.addDisp(DispMO, 0, RISCVII::MO_HI);
|
|
|
|
if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
|
|
return false;
|
|
|
|
auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
|
|
.addDisp(DispMO, 0, RISCVII::MO_LO);
|
|
|
|
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
case CodeModel::Medium:
|
|
// Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
|
|
// relocation needs to reference a label that points to the auipc
|
|
// instruction itself, not the global. This cannot be done inside the
|
|
// instruction selector.
|
|
if (IsExternWeak) {
|
|
// An extern weak symbol may be undefined, i.e. have value 0, which may
|
|
// not be within 2GiB of PC, so use GOT-indirect addressing to access the
|
|
// symbol. This generates the pattern (PseudoLGA sym), which expands to
|
|
// (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
|
|
MachineFunction &MF = *MI.getParent()->getParent();
|
|
MachineMemOperand *MemOp = MF.getMachineMemOperand(
|
|
MachinePointerInfo::getGOT(MF),
|
|
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
|
|
MachineMemOperand::MOInvariant,
|
|
DefTy, Align(DefTy.getSizeInBits() / 8));
|
|
|
|
auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
|
|
.addDisp(DispMO, 0)
|
|
.addMemOperand(MemOp);
|
|
|
|
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Generate a sequence for accessing addresses within any 2GiB range
|
|
// within the address space. This generates the pattern (PseudoLLA sym),
|
|
// which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
|
|
MI.setDesc(TII.get(RISCV::PseudoLLA));
|
|
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI,
|
|
MachineIRBuilder &MIB) const {
|
|
if (!STI.isRV64())
|
|
return false;
|
|
|
|
const MachineOperand &Size = MI.getOperand(2);
|
|
// Only Size == 32 (i.e. shift by 32 bits) is acceptable at this point.
|
|
if (!Size.isImm() || Size.getImm() != 32)
|
|
return false;
|
|
|
|
const MachineOperand &Src = MI.getOperand(1);
|
|
const MachineOperand &Dst = MI.getOperand(0);
|
|
// addiw rd, rs, 0 (i.e. sext.w rd, rs)
|
|
MachineInstr *NewMI =
|
|
MIB.buildInstr(RISCV::ADDIW, {Dst.getReg()}, {Src.getReg()}).addImm(0U);
|
|
|
|
if (!constrainSelectedInstRegOperands(*NewMI, TII, TRI, RBI))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
|
|
MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const {
|
|
auto &SelectMI = cast<GSelect>(MI);
|
|
|
|
Register LHS, RHS;
|
|
RISCVCC::CondCode CC;
|
|
getOperandsForBranch(SelectMI.getCondReg(), MRI, CC, LHS, RHS);
|
|
|
|
Register DstReg = SelectMI.getReg(0);
|
|
|
|
unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
|
|
if (RBI.getRegBank(DstReg, MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
|
|
unsigned Size = MRI.getType(DstReg).getSizeInBits();
|
|
Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
|
|
: RISCV::Select_FPR64_Using_CC_GPR;
|
|
}
|
|
|
|
MachineInstr *Result = MIB.buildInstr(Opc)
|
|
.addDef(DstReg)
|
|
.addReg(LHS)
|
|
.addReg(RHS)
|
|
.addImm(CC)
|
|
.addReg(SelectMI.getTrueReg())
|
|
.addReg(SelectMI.getFalseReg());
|
|
MI.eraseFromParent();
|
|
return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
|
|
}
|
|
|
|
// Convert an FCMP predicate to one of the supported F or D instructions.
|
|
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
|
|
assert((Size == 32 || Size == 64) && "Unsupported size");
|
|
switch (Pred) {
|
|
default:
|
|
llvm_unreachable("Unsupported predicate");
|
|
case CmpInst::FCMP_OLT:
|
|
return Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
|
|
case CmpInst::FCMP_OLE:
|
|
return Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
|
|
case CmpInst::FCMP_OEQ:
|
|
return Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
|
|
}
|
|
}
|
|
|
|
// Try legalizing an FCMP by swapping or inverting the predicate to one that
|
|
// is supported.
|
|
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
|
|
CmpInst::Predicate &Pred, bool &NeedInvert) {
|
|
auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
|
|
return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
|
|
Pred == CmpInst::FCMP_OEQ;
|
|
};
|
|
|
|
assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
|
|
|
|
CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
|
|
if (isLegalFCmpPredicate(InvPred)) {
|
|
Pred = InvPred;
|
|
std::swap(LHS, RHS);
|
|
return true;
|
|
}
|
|
|
|
InvPred = CmpInst::getInversePredicate(Pred);
|
|
NeedInvert = true;
|
|
if (isLegalFCmpPredicate(InvPred)) {
|
|
Pred = InvPred;
|
|
return true;
|
|
}
|
|
InvPred = CmpInst::getSwappedPredicate(InvPred);
|
|
if (isLegalFCmpPredicate(InvPred)) {
|
|
Pred = InvPred;
|
|
std::swap(LHS, RHS);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
|
|
// the result in DstReg.
|
|
// FIXME: Maybe we should expand this earlier.
|
|
bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
|
|
MachineIRBuilder &MIB,
|
|
MachineRegisterInfo &MRI) const {
|
|
auto &CmpMI = cast<GFCmp>(MI);
|
|
CmpInst::Predicate Pred = CmpMI.getCond();
|
|
|
|
Register DstReg = CmpMI.getReg(0);
|
|
Register LHS = CmpMI.getLHSReg();
|
|
Register RHS = CmpMI.getRHSReg();
|
|
|
|
unsigned Size = MRI.getType(LHS).getSizeInBits();
|
|
assert((Size == 32 || Size == 64) && "Unexpected size");
|
|
|
|
Register TmpReg = DstReg;
|
|
|
|
bool NeedInvert = false;
|
|
// First try swapping operands or inverting.
|
|
if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
|
|
if (NeedInvert)
|
|
TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
|
|
if (!Cmp.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
} else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
|
|
// fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
|
|
NeedInvert = Pred == CmpInst::FCMP_UEQ;
|
|
auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
|
|
{&RISCV::GPRRegClass}, {LHS, RHS});
|
|
if (!Cmp1.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
|
|
{&RISCV::GPRRegClass}, {RHS, LHS});
|
|
if (!Cmp2.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
if (NeedInvert)
|
|
TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
auto Or =
|
|
MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
|
|
if (!Or.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
} else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
|
|
// fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
|
|
// FIXME: If LHS and RHS are the same we can use a single FEQ.
|
|
NeedInvert = Pred == CmpInst::FCMP_UNO;
|
|
auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
|
|
{&RISCV::GPRRegClass}, {LHS, LHS});
|
|
if (!Cmp1.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
|
|
{&RISCV::GPRRegClass}, {RHS, RHS});
|
|
if (!Cmp2.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
if (NeedInvert)
|
|
TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
|
|
auto And =
|
|
MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
|
|
if (!And.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
} else
|
|
llvm_unreachable("Unhandled predicate");
|
|
|
|
// Emit an XORI to invert the result if needed.
|
|
if (NeedInvert) {
|
|
auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
|
|
if (!Xor.constrainAllUses(TII, TRI, RBI))
|
|
return false;
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
|
|
MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
|
|
assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
|
|
"Unexpected opcode");
|
|
// Find the intrinsic ID.
|
|
unsigned IntrinID = cast<GIntrinsic>(MI).getIntrinsicID();
|
|
|
|
// Select the instruction.
|
|
switch (IntrinID) {
|
|
default:
|
|
return false;
|
|
case Intrinsic::trap:
|
|
MIB.buildInstr(RISCV::UNIMP, {}, {});
|
|
break;
|
|
case Intrinsic::debugtrap:
|
|
MIB.buildInstr(RISCV::EBREAK, {}, {});
|
|
break;
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
|
|
SyncScope::ID FenceSSID,
|
|
MachineIRBuilder &MIB) const {
|
|
if (STI.hasStdExtZtso()) {
|
|
// The only fence that needs an instruction is a sequentially-consistent
|
|
// cross-thread fence.
|
|
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
|
|
FenceSSID == SyncScope::System) {
|
|
// fence rw, rw
|
|
MIB.buildInstr(RISCV::FENCE, {}, {})
|
|
.addImm(RISCVFenceField::R | RISCVFenceField::W)
|
|
.addImm(RISCVFenceField::R | RISCVFenceField::W);
|
|
return;
|
|
}
|
|
|
|
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
|
|
MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
|
|
return;
|
|
}
|
|
|
|
// singlethread fences only synchronize with signal handlers on the same
|
|
// thread and thus only need to preserve instruction order, not actually
|
|
// enforce memory ordering.
|
|
if (FenceSSID == SyncScope::SingleThread) {
|
|
MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
|
|
return;
|
|
}
|
|
|
|
// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
|
|
// Manual: Volume I.
|
|
unsigned Pred, Succ;
|
|
switch (FenceOrdering) {
|
|
default:
|
|
llvm_unreachable("Unexpected ordering");
|
|
case AtomicOrdering::AcquireRelease:
|
|
// fence acq_rel -> fence.tso
|
|
MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
|
|
return;
|
|
case AtomicOrdering::Acquire:
|
|
// fence acquire -> fence r, rw
|
|
Pred = RISCVFenceField::R;
|
|
Succ = RISCVFenceField::R | RISCVFenceField::W;
|
|
break;
|
|
case AtomicOrdering::Release:
|
|
// fence release -> fence rw, w
|
|
Pred = RISCVFenceField::R | RISCVFenceField::W;
|
|
Succ = RISCVFenceField::W;
|
|
break;
|
|
case AtomicOrdering::SequentiallyConsistent:
|
|
// fence seq_cst -> fence rw, rw
|
|
Pred = RISCVFenceField::R | RISCVFenceField::W;
|
|
Succ = RISCVFenceField::R | RISCVFenceField::W;
|
|
break;
|
|
}
|
|
MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
|
|
}
|
|
|
|
namespace llvm {
|
|
InstructionSelector *
|
|
createRISCVInstructionSelector(const RISCVTargetMachine &TM,
|
|
RISCVSubtarget &Subtarget,
|
|
RISCVRegisterBankInfo &RBI) {
|
|
return new RISCVInstructionSelector(TM, Subtarget, RBI);
|
|
}
|
|
} // end namespace llvm
|