radeon/llvm: Remove AMDILMachinePeephole pass

This commit is contained in:
Tom Stellard 2012-05-23 14:41:02 -04:00
parent e9d8901a80
commit 3059c075a7
4 changed files with 0 additions and 177 deletions

View file

@ -95,10 +95,6 @@ FunctionPass*
FunctionPass*
createAMDILPeepholeOpt(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
/// Pre regalloc passes.
FunctionPass*
createAMDILMachinePeephole(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);
/// Pre emit passes.
FunctionPass*
createAMDILCFGPreparationPass(TargetMachine &TM AMDIL_OPT_LEVEL_DECL);

View file

@ -1,170 +0,0 @@
//===-- AMDILMachinePeephole.cpp - AMDIL Machine Peephole Pass -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//==-----------------------------------------------------------------------===//
#include "AMDIL.h"
#include "AMDILInstrInfo.h"
#include "AMDILSubtarget.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
namespace
{
class AMDILMachinePeephole : public MachineFunctionPass
{
public:
static char ID;
AMDILMachinePeephole(TargetMachine &tm AMDIL_OPT_LEVEL_DECL);
//virtual ~AMDILMachinePeephole();
virtual const char*
getPassName() const;
virtual bool
runOnMachineFunction(MachineFunction &MF);
private:
void insertFence(MachineBasicBlock::iterator &MIB);
TargetMachine &TM;
bool mDebug;
}; // AMDILMachinePeephole
char AMDILMachinePeephole::ID = 0;
} // anonymous namespace
namespace llvm
{
FunctionPass*
createAMDILMachinePeephole(TargetMachine &tm AMDIL_OPT_LEVEL_DECL)
{
return new AMDILMachinePeephole(tm AMDIL_OPT_LEVEL_VAR);
}
} // llvm namespace
AMDILMachinePeephole::AMDILMachinePeephole(TargetMachine &tm AMDIL_OPT_LEVEL_DECL)
: MachineFunctionPass(ID), TM(tm)
{
mDebug = false;
}
bool
AMDILMachinePeephole::runOnMachineFunction(MachineFunction &MF)
{
bool Changed = false;
const AMDILSubtarget *STM = &TM.getSubtarget<AMDILSubtarget>();
const AMDILInstrInfo * AMDILII =
static_cast<const AMDILInstrInfo *>(TM.getInstrInfo());
for (MachineFunction::iterator MBB = MF.begin(), MBE = MF.end();
MBB != MBE; ++MBB) {
MachineBasicBlock *mb = MBB;
for (MachineBasicBlock::iterator MIB = mb->begin(), MIE = mb->end();
MIB != MIE; ++MIB) {
MachineInstr *mi = MIB;
const char * name;
name = TM.getInstrInfo()->getName(mi->getOpcode());
switch (mi->getOpcode()) {
default:
if (AMDILII->isAtomicInst(mi)) {
// If we don't support the hardware accellerated address spaces,
// then the atomic needs to be transformed to the global atomic.
if (strstr(name, "_L_")
&& STM->device()->usesSoftware(AMDILDeviceInfo::LocalMem)) {
BuildMI(*mb, MIB, mi->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::ADD_i32), AMDIL::R1011)
.addReg(mi->getOperand(1).getReg())
.addReg(AMDIL::T2);
mi->getOperand(1).setReg(AMDIL::R1011);
mi->setDesc(
TM.getInstrInfo()->get(
(mi->getOpcode() - AMDIL::ATOM_L_ADD) + AMDIL::ATOM_G_ADD));
} else if (strstr(name, "_R_")
&& STM->device()->usesSoftware(AMDILDeviceInfo::RegionMem)) {
assert(!"Software region memory is not supported!");
mi->setDesc(
TM.getInstrInfo()->get(
(mi->getOpcode() - AMDIL::ATOM_R_ADD) + AMDIL::ATOM_G_ADD));
}
} else if ((AMDILII->isLoadInst(mi) || AMDILII->isStoreInst(mi))
&& AMDILII->isVolatileInst(mi)) {
insertFence(MIB);
}
continue;
break;
case AMDIL::USHR_i16:
case AMDIL::USHR_v2i16:
case AMDIL::USHR_v4i16:
case AMDIL::USHRVEC_i16:
case AMDIL::USHRVEC_v2i16:
case AMDIL::USHRVEC_v4i16:
if (TM.getSubtarget<AMDILSubtarget>()
.device()->usesSoftware(AMDILDeviceInfo::ShortOps)) {
unsigned lReg = MF.getRegInfo()
.createVirtualRegister(&AMDIL::GPRI32RegClass);
unsigned Reg = MF.getRegInfo()
.createVirtualRegister(&AMDIL::GPRV4I32RegClass);
BuildMI(*mb, MIB, mi->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::LOADCONST_i32),
lReg).addImm(0xFFFF);
BuildMI(*mb, MIB, mi->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::BINARY_AND_v4i32),
Reg)
.addReg(mi->getOperand(1).getReg())
.addReg(lReg);
mi->getOperand(1).setReg(Reg);
}
break;
case AMDIL::USHR_i8:
case AMDIL::USHR_v2i8:
case AMDIL::USHR_v4i8:
case AMDIL::USHRVEC_i8:
case AMDIL::USHRVEC_v2i8:
case AMDIL::USHRVEC_v4i8:
if (TM.getSubtarget<AMDILSubtarget>()
.device()->usesSoftware(AMDILDeviceInfo::ByteOps)) {
unsigned lReg = MF.getRegInfo()
.createVirtualRegister(&AMDIL::GPRI32RegClass);
unsigned Reg = MF.getRegInfo()
.createVirtualRegister(&AMDIL::GPRV4I32RegClass);
BuildMI(*mb, MIB, mi->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::LOADCONST_i32),
lReg).addImm(0xFF);
BuildMI(*mb, MIB, mi->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::BINARY_AND_v4i32),
Reg)
.addReg(mi->getOperand(1).getReg())
.addReg(lReg);
mi->getOperand(1).setReg(Reg);
}
break;
}
}
}
return Changed;
}
const char*
AMDILMachinePeephole::getPassName() const
{
return "AMDIL Generic Machine Peephole Optimization Pass";
}
void
AMDILMachinePeephole::insertFence(MachineBasicBlock::iterator &MIB)
{
MachineInstr *MI = MIB;
MachineInstr *fence = BuildMI(*(MI->getParent()->getParent()),
MI->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::FENCE)).addReg(1);
MI->getParent()->insert(MIB, fence);
fence = BuildMI(*(MI->getParent()->getParent()),
MI->getDebugLoc(),
TM.getInstrInfo()->get(AMDIL::FENCE)).addReg(1);
MIB = MI->getParent()->insertAfter(MIB, fence);
}

View file

@ -161,8 +161,6 @@ bool AMDILPassConfig::addPreRegAlloc()
if (TM->getOptLevel() == CodeGenOpt::None) {
llvm::RegisterScheduler::setDefault(&llvm::createSourceListDAGScheduler);
}
PM->add(createAMDILMachinePeephole(*TM));
return false;
}

View file

@ -28,7 +28,6 @@ CPP_SOURCES := \
AMDILIntrinsicInfo.cpp \
AMDILISelDAGToDAG.cpp \
AMDILISelLowering.cpp \
AMDILMachinePeephole.cpp \
AMDILNIDevice.cpp \
AMDILPeepholeOptimizer.cpp \
AMDILRegisterInfo.cpp \