style: fix missing spaces in control statements
Result of running 'hg m5style --skip-all --fix-control -a'.
This commit is contained in:
parent
dc8018a5c3
commit
5592798865
85 changed files with 249 additions and 249 deletions
|
@ -73,7 +73,7 @@ AlphaLiveProcess::argsInit(int intSize, int pageSize)
|
|||
std::vector<auxv_t> auxv;
|
||||
|
||||
ElfObject * elfObject = dynamic_cast<ElfObject *>(objFile);
|
||||
if(elfObject)
|
||||
if (elfObject)
|
||||
{
|
||||
// modern glibc uses a bunch of auxiliary vectors to set up
|
||||
// TLS as well as do a bunch of other stuff
|
||||
|
|
|
@ -145,7 +145,7 @@ MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
|
|||
// 32-bit memory operation
|
||||
// Find register for operation
|
||||
unsigned reg_idx;
|
||||
while(!bits(regs, reg)) reg++;
|
||||
while (!bits(regs, reg)) reg++;
|
||||
replaceBits(regs, reg, 0);
|
||||
reg_idx = force_user ? intRegInMode(MODE_USER, reg) : reg;
|
||||
|
||||
|
@ -1149,7 +1149,7 @@ VldMultOp64::VldMultOp64(const char *mnem, ExtMachInst machInst,
|
|||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for(; i < numMemMicroops - 1; ++i) {
|
||||
for (; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonLoad64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accSize */, eSize);
|
||||
|
@ -1231,7 +1231,7 @@ VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst,
|
|||
microOps = new StaticInstPtr[numMicroops];
|
||||
unsigned uopIdx = 0;
|
||||
|
||||
for(int i = 0; i < numMarshalMicroops; ++i) {
|
||||
for (int i = 0; i < numMarshalMicroops; ++i) {
|
||||
switch (numRegs) {
|
||||
case 1: microOps[uopIdx++] = new MicroIntNeon64_1Reg(
|
||||
machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
|
||||
|
@ -1257,7 +1257,7 @@ VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst,
|
|||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for(; i < numMemMicroops - 1; ++i) {
|
||||
for (; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonStore64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accSize */, eSize);
|
||||
|
@ -1347,7 +1347,7 @@ VldSingleOp64::VldSingleOp64(const char *mnem, ExtMachInst machInst,
|
|||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < numMarshalMicroops; ++i) {
|
||||
for (int i = 0; i < numMarshalMicroops; ++i) {
|
||||
microOps[uopIdx++] = new MicroUnpackNeon64(
|
||||
machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
|
||||
numStructElems, index, i /* step */, replicate);
|
||||
|
@ -1394,7 +1394,7 @@ VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst,
|
|||
microOps = new StaticInstPtr[numMicroops];
|
||||
unsigned uopIdx = 0;
|
||||
|
||||
for(int i = 0; i < numMarshalMicroops; ++i) {
|
||||
for (int i = 0; i < numMarshalMicroops; ++i) {
|
||||
microOps[uopIdx++] = new MicroPackNeon64(
|
||||
machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
|
||||
numStructElems, index, i /* step */, replicate);
|
||||
|
@ -1404,7 +1404,7 @@ VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst,
|
|||
TLB::AllowUnaligned;
|
||||
|
||||
int i = 0;
|
||||
for(; i < numMemMicroops - 1; ++i) {
|
||||
for (; i < numMemMicroops - 1; ++i) {
|
||||
microOps[uopIdx++] = new MicroNeonStore64(
|
||||
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
|
||||
baseIsSP, 16 /* accsize */, eSize);
|
||||
|
|
|
@ -551,7 +551,7 @@ fpMulX(T a, T b)
|
|||
bool zero1 = (std::fpclassify(a) == FP_ZERO);
|
||||
bool zero2 = (std::fpclassify(b) == FP_ZERO);
|
||||
if ((inf1 && zero2) || (zero1 && inf2)) {
|
||||
if(sign1 ^ sign2)
|
||||
if (sign1 ^ sign2)
|
||||
return (T)(-2.0);
|
||||
else
|
||||
return (T)(2.0);
|
||||
|
@ -685,7 +685,7 @@ fpRSqrts(T a, T b)
|
|||
}
|
||||
aXb = a*b;
|
||||
fpClassAxB = std::fpclassify(aXb);
|
||||
if(fpClassAxB == FP_SUBNORMAL) {
|
||||
if (fpClassAxB == FP_SUBNORMAL) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
return 1.5;
|
||||
}
|
||||
|
@ -707,7 +707,7 @@ fpRecps(T a, T b)
|
|||
}
|
||||
aXb = a*b;
|
||||
fpClassAxB = std::fpclassify(aXb);
|
||||
if(fpClassAxB == FP_SUBNORMAL) {
|
||||
if (fpClassAxB == FP_SUBNORMAL) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
return 2.0;
|
||||
}
|
||||
|
@ -729,7 +729,7 @@ fpRSqrtsS(float a, float b)
|
|||
}
|
||||
aXb = a*b;
|
||||
fpClassAxB = std::fpclassify(aXb);
|
||||
if(fpClassAxB == FP_SUBNORMAL) {
|
||||
if (fpClassAxB == FP_SUBNORMAL) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
return 1.5;
|
||||
}
|
||||
|
@ -750,7 +750,7 @@ fpRecpsS(float a, float b)
|
|||
}
|
||||
aXb = a*b;
|
||||
fpClassAxB = std::fpclassify(aXb);
|
||||
if(fpClassAxB == FP_SUBNORMAL) {
|
||||
if (fpClassAxB == FP_SUBNORMAL) {
|
||||
feraiseexcept(FeUnderflow);
|
||||
return 2.0;
|
||||
}
|
||||
|
|
|
@ -399,7 +399,7 @@ ArmKvmCPU::decodeCoProcReg(uint64_t id) const
|
|||
default:
|
||||
return NUM_MISCREGS;
|
||||
}
|
||||
} else if(is_reg64) {
|
||||
} else if (is_reg64) {
|
||||
return NUM_MISCREGS;
|
||||
} else {
|
||||
warn("Unhandled register length, register (0x%x) ignored.\n");
|
||||
|
|
|
@ -83,7 +83,7 @@ LinuxArmSystem::LinuxArmSystem(Params *p)
|
|||
// newer kernels use __loop_udelay and __loop_const_udelay symbols
|
||||
uDelaySkipEvent = addKernelFuncEvent<UDelayEvent>(
|
||||
"__loop_udelay", "__udelay", 1000, 0);
|
||||
if(!uDelaySkipEvent)
|
||||
if (!uDelaySkipEvent)
|
||||
uDelaySkipEvent = addKernelFuncEventOrPanic<UDelayEvent>(
|
||||
"__udelay", "__udelay", 1000, 0);
|
||||
|
||||
|
@ -91,7 +91,7 @@ LinuxArmSystem::LinuxArmSystem(Params *p)
|
|||
// time. Constant comes from code.
|
||||
constUDelaySkipEvent = addKernelFuncEvent<UDelayEvent>(
|
||||
"__loop_const_udelay", "__const_udelay", 1000, 107374);
|
||||
if(!constUDelaySkipEvent)
|
||||
if (!constUDelaySkipEvent)
|
||||
constUDelaySkipEvent = addKernelFuncEventOrPanic<UDelayEvent>(
|
||||
"__const_udelay", "__const_udelay", 1000, 107374);
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
|
|||
// than rangeMRU
|
||||
if (x > rangeMRU && !functional) {
|
||||
TlbEntry tmp_entry = table[x];
|
||||
for(int i = x; i > 0; i--)
|
||||
for (int i = x; i > 0; i--)
|
||||
table[i] = table[i - 1];
|
||||
table[0] = tmp_entry;
|
||||
retval = &table[0];
|
||||
|
@ -394,7 +394,7 @@ TLB::serialize(CheckpointOut &cp) const
|
|||
|
||||
int num_entries = size;
|
||||
SERIALIZE_SCALAR(num_entries);
|
||||
for(int i = 0; i < size; i++)
|
||||
for (int i = 0; i < size; i++)
|
||||
table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
|
||||
}
|
||||
|
||||
|
@ -410,7 +410,7 @@ TLB::unserialize(CheckpointIn &cp)
|
|||
|
||||
int num_entries;
|
||||
UNSERIALIZE_SCALAR(num_entries);
|
||||
for(int i = 0; i < min(size, num_entries); i++)
|
||||
for (int i = 0; i < min(size, num_entries); i++)
|
||||
table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ GenericTLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode)
|
|||
Process * p = tc->getProcessPtr();
|
||||
|
||||
Fault fault = p->pTable->translate(req);
|
||||
if(fault != NoFault)
|
||||
if (fault != NoFault)
|
||||
return fault;
|
||||
|
||||
return NoFault;
|
||||
|
|
|
@ -150,7 +150,7 @@ ISA::params() const
|
|||
void
|
||||
ISA::clear()
|
||||
{
|
||||
for(int i = 0; i < NumMiscRegs; i++) {
|
||||
for (int i = 0; i < NumMiscRegs; i++) {
|
||||
for (int j = 0; j < miscRegFile[i].size(); j++)
|
||||
miscRegFile[i][j] = 0;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ writeOutField(PortProxy& proxy, Addr addr, T val)
|
|||
proxy.writeBlob(addr, (uint8_t *)(&guestVal), sizeof(T));
|
||||
|
||||
uint8_t checkSum = 0;
|
||||
while(guestVal) {
|
||||
while (guestVal) {
|
||||
checkSum += guestVal;
|
||||
guestVal >>= 8;
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ namespace X86ISA {
|
|||
funcNum);
|
||||
return false;
|
||||
}
|
||||
} else if(family == 0x0000) {
|
||||
} else if (family == 0x0000) {
|
||||
// The standard functions
|
||||
switch (funcNum) {
|
||||
case VendorAndLargestStdFunc:
|
||||
|
|
|
@ -386,7 +386,7 @@ Decoder::processOpcode(ByteTable &immTable, ByteTable &modrmTable,
|
|||
//Figure out the effective address size. This can be overriden to
|
||||
//a fixed value at the decoder level.
|
||||
int logAddrSize;
|
||||
if(emi.legacy.addr)
|
||||
if (emi.legacy.addr)
|
||||
logAddrSize = altAddr;
|
||||
else
|
||||
logAddrSize = defAddr;
|
||||
|
@ -410,7 +410,7 @@ Decoder::processOpcode(ByteTable &immTable, ByteTable &modrmTable,
|
|||
if (modrmTable[opcode]) {
|
||||
nextState = ModRMState;
|
||||
} else {
|
||||
if(immediateSize) {
|
||||
if (immediateSize) {
|
||||
nextState = ImmediateState;
|
||||
} else {
|
||||
instDone = true;
|
||||
|
@ -439,7 +439,7 @@ Decoder::processExtendedOpcode(ByteTable &immTable)
|
|||
//Figure out the effective address size. This can be overriden to
|
||||
//a fixed value at the decoder level.
|
||||
int logAddrSize;
|
||||
if(emi.legacy.addr)
|
||||
if (emi.legacy.addr)
|
||||
logAddrSize = altAddr;
|
||||
else
|
||||
logAddrSize = defAddr;
|
||||
|
@ -509,9 +509,9 @@ Decoder::doModRMState(uint8_t nextByte)
|
|||
if (modRM.rm == 4 && modRM.mod != 3) {
|
||||
// && in 32/64 bit mode)
|
||||
nextState = SIBState;
|
||||
} else if(displacementSize) {
|
||||
} else if (displacementSize) {
|
||||
nextState = DisplacementState;
|
||||
} else if(immediateSize) {
|
||||
} else if (immediateSize) {
|
||||
nextState = ImmediateState;
|
||||
} else {
|
||||
instDone = true;
|
||||
|
@ -537,7 +537,7 @@ Decoder::doSIBState(uint8_t nextByte)
|
|||
displacementSize = 4;
|
||||
if (displacementSize) {
|
||||
nextState = DisplacementState;
|
||||
} else if(immediateSize) {
|
||||
} else if (immediateSize) {
|
||||
nextState = ImmediateState;
|
||||
} else {
|
||||
instDone = true;
|
||||
|
@ -560,7 +560,7 @@ Decoder::doDisplacementState()
|
|||
DPRINTF(Decoder, "Collecting %d byte displacement, got %d bytes.\n",
|
||||
displacementSize, immediateCollected);
|
||||
|
||||
if(displacementSize == immediateCollected) {
|
||||
if (displacementSize == immediateCollected) {
|
||||
//Reset this for other immediates.
|
||||
immediateCollected = 0;
|
||||
//Sign extend the displacement
|
||||
|
@ -580,7 +580,7 @@ Decoder::doDisplacementState()
|
|||
}
|
||||
DPRINTF(Decoder, "Collected displacement %#x.\n",
|
||||
emi.displacement);
|
||||
if(immediateSize) {
|
||||
if (immediateSize) {
|
||||
nextState = ImmediateState;
|
||||
} else {
|
||||
instDone = true;
|
||||
|
@ -608,7 +608,7 @@ Decoder::doImmediateState()
|
|||
DPRINTF(Decoder, "Collecting %d byte immediate, got %d bytes.\n",
|
||||
immediateSize, immediateCollected);
|
||||
|
||||
if(immediateSize == immediateCollected)
|
||||
if (immediateSize == immediateCollected)
|
||||
{
|
||||
//Reset this for other immediates.
|
||||
immediateCollected = 0;
|
||||
|
|
|
@ -49,7 +49,7 @@ namespace X86ISA
|
|||
std::stringstream response;
|
||||
|
||||
printMnemonic(response, instMnem, mnemonic);
|
||||
if(flags[IsLoad])
|
||||
if (flags[IsLoad])
|
||||
printDestReg(response, 0, dataSize);
|
||||
else
|
||||
printSrcReg(response, 2, dataSize);
|
||||
|
|
|
@ -52,27 +52,27 @@ namespace X86ISA
|
|||
{
|
||||
DPRINTF(X86, "flagMask = %#x\n", flagMask);
|
||||
uint64_t flags = oldFlags & ~flagMask;
|
||||
if(flagMask & (ECFBit | CFBit))
|
||||
if (flagMask & (ECFBit | CFBit))
|
||||
{
|
||||
if(findCarry(dataSize*8, _dest, _src1, _src2))
|
||||
if (findCarry(dataSize*8, _dest, _src1, _src2))
|
||||
flags |= (flagMask & (ECFBit | CFBit));
|
||||
if(subtract)
|
||||
if (subtract)
|
||||
flags ^= (flagMask & (ECFBit | CFBit));
|
||||
}
|
||||
if(flagMask & PFBit && !findParity(8, _dest))
|
||||
if (flagMask & PFBit && !findParity(8, _dest))
|
||||
flags |= PFBit;
|
||||
if(flagMask & AFBit)
|
||||
if (flagMask & AFBit)
|
||||
{
|
||||
if(findCarry(4, _dest, _src1, _src2))
|
||||
if (findCarry(4, _dest, _src1, _src2))
|
||||
flags |= AFBit;
|
||||
if(subtract)
|
||||
if (subtract)
|
||||
flags ^= AFBit;
|
||||
}
|
||||
if(flagMask & (EZFBit | ZFBit) && findZero(dataSize*8, _dest))
|
||||
if (flagMask & (EZFBit | ZFBit) && findZero(dataSize*8, _dest))
|
||||
flags |= (flagMask & (EZFBit | ZFBit));
|
||||
if(flagMask & SFBit && findNegative(dataSize*8, _dest))
|
||||
if (flagMask & SFBit && findNegative(dataSize*8, _dest))
|
||||
flags |= SFBit;
|
||||
if(flagMask & OFBit && findOverflow(dataSize*8, _dest, _src1, _src2))
|
||||
if (flagMask & OFBit && findOverflow(dataSize*8, _dest, _src1, _src2))
|
||||
flags |= OFBit;
|
||||
return flags;
|
||||
}
|
||||
|
|
|
@ -107,14 +107,14 @@ namespace X86ISA
|
|||
void
|
||||
X86StaticInst::printSrcReg(std::ostream &os, int reg, int size) const
|
||||
{
|
||||
if(_numSrcRegs > reg)
|
||||
if (_numSrcRegs > reg)
|
||||
printReg(os, _srcRegIdx[reg], size);
|
||||
}
|
||||
|
||||
void
|
||||
X86StaticInst::printDestReg(std::ostream &os, int reg, int size) const
|
||||
{
|
||||
if(_numDestRegs > reg)
|
||||
if (_numDestRegs > reg)
|
||||
printReg(os, _destRegIdx[reg], size);
|
||||
}
|
||||
|
||||
|
@ -139,9 +139,9 @@ namespace X86ISA
|
|||
bool fold = rel_reg & IntFoldBit;
|
||||
rel_reg &= ~IntFoldBit;
|
||||
|
||||
if(fold)
|
||||
if (fold)
|
||||
suffix = "h";
|
||||
else if(rel_reg < 8 && size == 1)
|
||||
else if (rel_reg < 8 && size == 1)
|
||||
suffix = "l";
|
||||
|
||||
switch (rel_reg) {
|
||||
|
@ -247,14 +247,14 @@ namespace X86ISA
|
|||
} else {
|
||||
if (scale != 0 && index != ZeroReg)
|
||||
{
|
||||
if(scale != 1)
|
||||
if (scale != 1)
|
||||
ccprintf(os, "%d*", scale);
|
||||
printReg(os, index, addressSize);
|
||||
someAddr = true;
|
||||
}
|
||||
if (base != ZeroReg)
|
||||
{
|
||||
if(someAddr)
|
||||
if (someAddr)
|
||||
os << " + ";
|
||||
printReg(os, base, addressSize);
|
||||
someAddr = true;
|
||||
|
@ -262,7 +262,7 @@ namespace X86ISA
|
|||
}
|
||||
if (disp != 0)
|
||||
{
|
||||
if(someAddr)
|
||||
if (someAddr)
|
||||
os << " + ";
|
||||
ccprintf(os, "%#x", disp);
|
||||
someAddr = true;
|
||||
|
|
|
@ -91,7 +91,7 @@ namespace X86ISA
|
|||
inline uint64_t merge(uint64_t into, uint64_t val, int size) const
|
||||
{
|
||||
X86IntReg reg = into;
|
||||
if(_destRegIdx[0] & IntFoldBit)
|
||||
if (_destRegIdx[0] & IntFoldBit)
|
||||
{
|
||||
reg.H = val;
|
||||
return reg;
|
||||
|
@ -122,7 +122,7 @@ namespace X86ISA
|
|||
{
|
||||
X86IntReg reg = from;
|
||||
DPRINTF(X86, "Picking with size %d\n", size);
|
||||
if(_srcRegIdx[idx] & IntFoldBit)
|
||||
if (_srcRegIdx[idx] & IntFoldBit)
|
||||
return reg.H;
|
||||
switch(size)
|
||||
{
|
||||
|
@ -143,7 +143,7 @@ namespace X86ISA
|
|||
{
|
||||
X86IntReg reg = from;
|
||||
DPRINTF(X86, "Picking with size %d\n", size);
|
||||
if(_srcRegIdx[idx] & IntFoldBit)
|
||||
if (_srcRegIdx[idx] & IntFoldBit)
|
||||
return reg.SH;
|
||||
switch(size)
|
||||
{
|
||||
|
|
|
@ -105,9 +105,9 @@ X86NativeTrace::X86NativeTrace(const Params *p)
|
|||
bool
|
||||
X86NativeTrace::checkRcxReg(const char * name, uint64_t &mVal, uint64_t &nVal)
|
||||
{
|
||||
if(!checkRcx)
|
||||
if (!checkRcx)
|
||||
checkRcx = (mVal != oldRcxVal || nVal != oldRealRcxVal);
|
||||
if(checkRcx)
|
||||
if (checkRcx)
|
||||
return checkReg(name, mVal, nVal);
|
||||
return true;
|
||||
}
|
||||
|
@ -115,9 +115,9 @@ X86NativeTrace::checkRcxReg(const char * name, uint64_t &mVal, uint64_t &nVal)
|
|||
bool
|
||||
X86NativeTrace::checkR11Reg(const char * name, uint64_t &mVal, uint64_t &nVal)
|
||||
{
|
||||
if(!checkR11)
|
||||
if (!checkR11)
|
||||
checkR11 = (mVal != oldR11Val || nVal != oldRealR11Val);
|
||||
if(checkR11)
|
||||
if (checkR11)
|
||||
return checkReg(name, mVal, nVal);
|
||||
return true;
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ X86NativeTrace::check(NativeTraceRecord *record)
|
|||
nState.update(this);
|
||||
mState.update(record->getThread());
|
||||
|
||||
if(record->getStaticInst()->isSyscall())
|
||||
if (record->getStaticInst()->isSyscall())
|
||||
{
|
||||
checkRcx = false;
|
||||
checkR11 = false;
|
||||
|
|
|
@ -239,7 +239,7 @@ Walker::WalkerState::startWalk()
|
|||
nextState = Ready;
|
||||
if (write)
|
||||
walker->port.sendAtomic(write);
|
||||
} while(read);
|
||||
} while (read);
|
||||
state = Ready;
|
||||
nextState = Waiting;
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ Walker::WalkerState::startFunctional(Addr &addr, unsigned &logBytes)
|
|||
assert(fault == NoFault || read == NULL);
|
||||
state = nextState;
|
||||
nextState = Ready;
|
||||
} while(read);
|
||||
} while (read);
|
||||
logBytes = entry.logBytes;
|
||||
addr = entry.paddr;
|
||||
|
||||
|
|
|
@ -552,7 +552,7 @@ X86_64LiveProcess::initState()
|
|||
dataAttr.system = 1;
|
||||
|
||||
//Initialize the segment registers.
|
||||
for(int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
|
||||
for (int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
|
||||
tc->setMiscRegNoEffect(MISCREG_SEG_BASE(seg), 0);
|
||||
tc->setMiscRegNoEffect(MISCREG_SEG_EFF_BASE(seg), 0);
|
||||
tc->setMiscRegNoEffect(MISCREG_SEG_ATTR(seg), dataAttr);
|
||||
|
@ -663,7 +663,7 @@ I386LiveProcess::initState()
|
|||
dataAttr.system = 1;
|
||||
|
||||
//Initialize the segment registers.
|
||||
for(int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
|
||||
for (int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
|
||||
tc->setMiscRegNoEffect(MISCREG_SEG_BASE(seg), 0);
|
||||
tc->setMiscRegNoEffect(MISCREG_SEG_EFF_BASE(seg), 0);
|
||||
tc->setMiscRegNoEffect(MISCREG_SEG_ATTR(seg), dataAttr);
|
||||
|
@ -735,7 +735,7 @@ X86LiveProcess::argsInit(int pageSize,
|
|||
std::vector<auxv_t> auxv = extraAuxvs;
|
||||
|
||||
string filename;
|
||||
if(argv.size() < 1)
|
||||
if (argv.size() < 1)
|
||||
filename = "";
|
||||
else
|
||||
filename = argv[0];
|
||||
|
|
|
@ -258,31 +258,31 @@ namespace X86ISA
|
|||
inline static bool
|
||||
operator == (const ExtMachInst &emi1, const ExtMachInst &emi2)
|
||||
{
|
||||
if(emi1.legacy != emi2.legacy)
|
||||
if (emi1.legacy != emi2.legacy)
|
||||
return false;
|
||||
if(emi1.rex != emi2.rex)
|
||||
if (emi1.rex != emi2.rex)
|
||||
return false;
|
||||
if(emi1.opcode.type != emi2.opcode.type)
|
||||
if (emi1.opcode.type != emi2.opcode.type)
|
||||
return false;
|
||||
if(emi1.opcode.op != emi2.opcode.op)
|
||||
if (emi1.opcode.op != emi2.opcode.op)
|
||||
return false;
|
||||
if(emi1.modRM != emi2.modRM)
|
||||
if (emi1.modRM != emi2.modRM)
|
||||
return false;
|
||||
if(emi1.sib != emi2.sib)
|
||||
if (emi1.sib != emi2.sib)
|
||||
return false;
|
||||
if(emi1.immediate != emi2.immediate)
|
||||
if (emi1.immediate != emi2.immediate)
|
||||
return false;
|
||||
if(emi1.displacement != emi2.displacement)
|
||||
if (emi1.displacement != emi2.displacement)
|
||||
return false;
|
||||
if(emi1.mode != emi2.mode)
|
||||
if (emi1.mode != emi2.mode)
|
||||
return false;
|
||||
if(emi1.opSize != emi2.opSize)
|
||||
if (emi1.opSize != emi2.opSize)
|
||||
return false;
|
||||
if(emi1.addrSize != emi2.addrSize)
|
||||
if (emi1.addrSize != emi2.addrSize)
|
||||
return false;
|
||||
if(emi1.stackSize != emi2.stackSize)
|
||||
if (emi1.stackSize != emi2.stackSize)
|
||||
return false;
|
||||
if(emi1.dispSize != emi2.dispSize)
|
||||
if (emi1.dispSize != emi2.dispSize)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ ssize_t atomic_write(int fd, const void *s, size_t n);
|
|||
do { \
|
||||
static const char msg[] = m; \
|
||||
atomic_write(fd, msg, sizeof(msg) - 1); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Statically allocate a string and write it to STDERR.
|
||||
|
|
|
@ -1205,7 +1205,7 @@ CPA::serialize(CheckpointOut &cp) const
|
|||
}
|
||||
|
||||
// qData (vector<AnnotateList>)
|
||||
for(x = 0; x < qData.size(); x++) {
|
||||
for (x = 0; x < qData.size(); x++) {
|
||||
if (!qData[x].size())
|
||||
continue;
|
||||
y = 0;
|
||||
|
|
|
@ -47,7 +47,7 @@ int m5_fegetround()
|
|||
{
|
||||
int x;
|
||||
int rm = fegetround();
|
||||
for(x = 0; x < 4; x++)
|
||||
for (x = 0; x < 4; x++)
|
||||
if (m5_round_ops[x] == rm)
|
||||
return x;
|
||||
abort();
|
||||
|
|
|
@ -179,7 +179,7 @@ ElfObject::tryFile(const string &fname, size_t len, uint8_t *data)
|
|||
// 2 == solaris, 3 == freebsd
|
||||
data = elf_rawdata(section, NULL);
|
||||
assert(data->d_buf);
|
||||
if(ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
|
||||
if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
|
||||
osAbi = htole(((uint32_t*)data->d_buf)[4]);
|
||||
else
|
||||
osAbi = htobe(((uint32_t*)data->d_buf)[4]);
|
||||
|
@ -218,7 +218,7 @@ ElfObject::tryFile(const string &fname, size_t len, uint8_t *data)
|
|||
result->_programHeaderCount = ehdr.e_phnum;
|
||||
//Record the size of each entry
|
||||
result->_programHeaderSize = ehdr.e_phentsize;
|
||||
if(result->_programHeaderCount) //If there is a program header table
|
||||
if (result->_programHeaderCount) //If there is a program header table
|
||||
{
|
||||
//Figure out the virtual address of the header table in the
|
||||
//final memory image. We use the program headers themselves
|
||||
|
@ -226,11 +226,11 @@ ElfObject::tryFile(const string &fname, size_t len, uint8_t *data)
|
|||
GElf_Phdr phdr;
|
||||
uint64_t e_phoff = ehdr.e_phoff;
|
||||
result->_programHeaderTable = 0;
|
||||
for(int hdrnum = 0; hdrnum < result->_programHeaderCount; hdrnum++)
|
||||
for (int hdrnum = 0; hdrnum < result->_programHeaderCount; hdrnum++)
|
||||
{
|
||||
gelf_getphdr(elf, hdrnum, &phdr);
|
||||
//Check if we've found the segment with the headers in it
|
||||
if(phdr.p_offset <= e_phoff &&
|
||||
if (phdr.p_offset <= e_phoff &&
|
||||
phdr.p_offset + phdr.p_filesz > e_phoff)
|
||||
{
|
||||
result->_programHeaderTable =
|
||||
|
|
|
@ -364,9 +364,9 @@ HistStor::add(HistStor *hs)
|
|||
squares += hs->squares;
|
||||
samples += hs->samples;
|
||||
|
||||
while(bucket_size > hs->bucket_size)
|
||||
while (bucket_size > hs->bucket_size)
|
||||
hs->grow_up();
|
||||
while(bucket_size < hs->bucket_size)
|
||||
while (bucket_size < hs->bucket_size)
|
||||
grow_up();
|
||||
|
||||
for (uint32_t i = 0; i < b_size; i++)
|
||||
|
|
|
@ -98,7 +98,7 @@ CPUProgressEvent::process()
|
|||
if (_repeatEvent)
|
||||
cpu->schedule(this, curTick() + _interval);
|
||||
|
||||
if(cpu->switchedOut()) {
|
||||
if (cpu->switchedOut()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,7 @@ BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
|
|||
assert(tid < numThreads);
|
||||
AddressMonitor &monitor = addressMonitor[tid];
|
||||
|
||||
if(monitor.gotWakeup == false) {
|
||||
if (monitor.gotWakeup == false) {
|
||||
int block_size = cacheLineSize();
|
||||
uint64_t mask = ~((uint64_t)(block_size - 1));
|
||||
|
||||
|
@ -701,8 +701,8 @@ AddressMonitor::AddressMonitor() {
|
|||
|
||||
bool AddressMonitor::doMonitor(PacketPtr pkt) {
|
||||
assert(pkt->req->hasPaddr());
|
||||
if(armed && waiting) {
|
||||
if(pAddr == pkt->getAddr()) {
|
||||
if (armed && waiting) {
|
||||
if (pAddr == pkt->getAddr()) {
|
||||
DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
|
||||
pkt->getAddr());
|
||||
waiting = false;
|
||||
|
|
|
@ -242,5 +242,5 @@ PerfKvmCounter::read(void *buf, size_t size) const
|
|||
_buf += ret;
|
||||
break;
|
||||
}
|
||||
} while(_size);
|
||||
} while (_size);
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
|
|||
APPLY_IREG(r13, INTREG_R13); \
|
||||
APPLY_IREG(r14, INTREG_R14); \
|
||||
APPLY_IREG(r15, INTREG_R15); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
#define FOREACH_SREG() \
|
||||
do { \
|
||||
|
@ -129,7 +129,7 @@ static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
|
|||
APPLY_SREG(cr8, MISCREG_CR8); \
|
||||
APPLY_SREG(efer, MISCREG_EFER); \
|
||||
APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
#define FOREACH_DREG() \
|
||||
do { \
|
||||
|
@ -139,7 +139,7 @@ static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
|
|||
APPLY_DREG(db[3], MISCREG_DR3); \
|
||||
APPLY_DREG(dr6, MISCREG_DR6); \
|
||||
APPLY_DREG(dr7, MISCREG_DR7); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
#define FOREACH_SEGMENT() \
|
||||
do { \
|
||||
|
@ -151,13 +151,13 @@ static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
|
|||
APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
|
||||
APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
|
||||
APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
#define FOREACH_DTABLE() \
|
||||
do { \
|
||||
APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
|
||||
APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
template<typename STRUCT, typename ENTRY>
|
||||
static STRUCT *newVarStruct(size_t entries)
|
||||
|
|
|
@ -226,7 +226,7 @@ Decode::evaluate()
|
|||
execSeqNum++;
|
||||
|
||||
/* Correctly size the output before writing */
|
||||
if(output_index == 0) insts_out.resize(outputWidth);
|
||||
if (output_index == 0) insts_out.resize(outputWidth);
|
||||
/* Push into output */
|
||||
insts_out.insts[output_index] = output_inst;
|
||||
output_index++;
|
||||
|
|
|
@ -45,7 +45,7 @@ NativeTrace::NativeTrace(const Params *p)
|
|||
fatal("All listeners are disabled!");
|
||||
|
||||
int port = 8000;
|
||||
while(!native_listener.listen(port, true))
|
||||
while (!native_listener.listen(port, true))
|
||||
{
|
||||
DPRINTF(GDBMisc, "Can't bind port %d\n", port);
|
||||
port++;
|
||||
|
|
|
@ -90,7 +90,7 @@ class NativeTrace : public ExeTracer
|
|||
bool
|
||||
checkReg(const char * regName, T &val, T &realVal)
|
||||
{
|
||||
if(val != realVal)
|
||||
if (val != realVal)
|
||||
{
|
||||
DPRINTFN("Register %s should be %#x but is %#x.\n",
|
||||
regName, realVal, val);
|
||||
|
|
|
@ -498,7 +498,7 @@ InstructionQueue<Impl>::resetEntries()
|
|||
|
||||
if (iqPolicy == Partitioned) {
|
||||
maxEntries[tid] = numEntries / active_threads;
|
||||
} else if(iqPolicy == Threshold && active_threads == 1) {
|
||||
} else if (iqPolicy == Threshold && active_threads == 1) {
|
||||
maxEntries[tid] = numEntries;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -766,7 +766,7 @@ DefaultRename<Impl>::skidInsert(ThreadID tid)
|
|||
{
|
||||
typename InstQueue::iterator it;
|
||||
warn("Skidbuffer contents:\n");
|
||||
for(it = skidBuffer[tid].begin(); it != skidBuffer[tid].end(); it++)
|
||||
for (it = skidBuffer[tid].begin(); it != skidBuffer[tid].end(); it++)
|
||||
{
|
||||
warn("[tid:%u]: %s [sn:%i].\n", tid,
|
||||
(*it)->staticInst->disassemble(inst->instAddr()),
|
||||
|
|
|
@ -415,7 +415,7 @@ BPredUnit::squash(const InstSeqNum &squashed_sn, ThreadID tid)
|
|||
|
||||
RAS[tid].restore(pred_hist.front().RASIndex,
|
||||
pred_hist.front().RASTarget);
|
||||
} else if(pred_hist.front().wasCall && pred_hist.front().pushedRAS) {
|
||||
} else if (pred_hist.front().wasCall && pred_hist.front().pushedRAS) {
|
||||
// Was a call but predicated false. Pop RAS here
|
||||
DPRINTF(Branch, "[tid: %i] Squashing"
|
||||
" Call [sn:%i] PC: %s Popping RAS\n", tid,
|
||||
|
|
|
@ -139,7 +139,7 @@ AtomicSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender)
|
|||
|
||||
for (ThreadID tid = 0; tid < numThreads; tid++) {
|
||||
if (tid != sender) {
|
||||
if(getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
wakeup(tid);
|
||||
}
|
||||
|
||||
|
@ -312,7 +312,7 @@ AtomicSimpleCPU::AtomicCPUDPort::recvFunctionalSnoop(PacketPtr pkt)
|
|||
// X86 ISA: Snooping an invalidation for monitor/mwait
|
||||
AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
|
||||
for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
|
||||
if(cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
cpu->wakeup(tid);
|
||||
}
|
||||
}
|
||||
|
@ -450,13 +450,13 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
|
|||
//across a cache line boundary.
|
||||
Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
|
||||
|
||||
if(secondAddr > addr)
|
||||
if (secondAddr > addr)
|
||||
size = secondAddr - addr;
|
||||
|
||||
dcache_latency = 0;
|
||||
|
||||
req->taskId(taskId());
|
||||
while(1) {
|
||||
while (1) {
|
||||
req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
|
||||
|
||||
// translate to physical address
|
||||
|
@ -600,7 +600,7 @@ AtomicSimpleCPU::tick()
|
|||
// like the I cache. It should be flushed, and when that works
|
||||
// this code should be uncommented.
|
||||
//Fetch more instruction memory if necessary
|
||||
//if(decoder.needMoreBytes())
|
||||
//if (decoder.needMoreBytes())
|
||||
//{
|
||||
icache_access = true;
|
||||
Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
|
||||
|
@ -657,7 +657,7 @@ AtomicSimpleCPU::tick()
|
|||
}
|
||||
|
||||
}
|
||||
if(fault != NoFault || !t_info.stayAtPC)
|
||||
if (fault != NoFault || !t_info.stayAtPC)
|
||||
advancePC(fault);
|
||||
}
|
||||
|
||||
|
|
|
@ -497,7 +497,7 @@ BaseSimpleCPU::preExecute()
|
|||
//Predecode, ie bundle up an ExtMachInst
|
||||
//If more fetch data is needed, pass it in.
|
||||
Addr fetchPC = (pcState.instAddr() & PCMask) + t_info.fetchOffset;
|
||||
//if(decoder->needMoreBytes())
|
||||
//if (decoder->needMoreBytes())
|
||||
decoder->moreBytes(pcState, fetchPC, inst);
|
||||
//else
|
||||
// decoder->process();
|
||||
|
|
|
@ -551,7 +551,7 @@ TimingSimpleCPU::threadSnoop(PacketPtr pkt, ThreadID sender)
|
|||
{
|
||||
for (ThreadID tid = 0; tid < numThreads; tid++) {
|
||||
if (tid != sender) {
|
||||
if(getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
wakeup(tid);
|
||||
}
|
||||
TheISA::handleLockedSnoop(threadInfo[tid]->thread, pkt,
|
||||
|
@ -885,7 +885,7 @@ void
|
|||
TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
|
||||
{
|
||||
for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
|
||||
if(cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
|
||||
cpu->wakeup(tid);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -216,14 +216,14 @@ TsunamiCChip::write(PacketPtr pkt)
|
|||
olddir = dir[number];
|
||||
dim[number] = pkt->get<uint64_t>();
|
||||
dir[number] = dim[number] & drir;
|
||||
for(int x = 0; x < Tsunami::Max_CPUs; x++)
|
||||
for (int x = 0; x < Tsunami::Max_CPUs; x++)
|
||||
{
|
||||
bitvector = ULL(1) << x;
|
||||
// Figure out which bits have changed
|
||||
if ((dim[number] & bitvector) != (olddim & bitvector))
|
||||
{
|
||||
// The bit is now set and it wasn't before (set)
|
||||
if((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
if ((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
{
|
||||
tsunami->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
|
||||
DPRINTF(Tsunami, "dim write resulting in posting dir"
|
||||
|
@ -278,7 +278,7 @@ TsunamiCChip::write(PacketPtr pkt)
|
|||
if (pkt->get<uint64_t>() & 0x10000000)
|
||||
supportedWrite = true;
|
||||
|
||||
if(!supportedWrite)
|
||||
if (!supportedWrite)
|
||||
panic("TSDEV_CC_MISC write not implemented\n");
|
||||
|
||||
break;
|
||||
|
@ -292,11 +292,11 @@ TsunamiCChip::write(PacketPtr pkt)
|
|||
case TSDEV_CC_DIM2:
|
||||
case TSDEV_CC_DIM3:
|
||||
int number;
|
||||
if(regnum == TSDEV_CC_DIM0)
|
||||
if (regnum == TSDEV_CC_DIM0)
|
||||
number = 0;
|
||||
else if(regnum == TSDEV_CC_DIM1)
|
||||
else if (regnum == TSDEV_CC_DIM1)
|
||||
number = 1;
|
||||
else if(regnum == TSDEV_CC_DIM2)
|
||||
else if (regnum == TSDEV_CC_DIM2)
|
||||
number = 2;
|
||||
else
|
||||
number = 3;
|
||||
|
@ -309,14 +309,14 @@ TsunamiCChip::write(PacketPtr pkt)
|
|||
olddir = dir[number];
|
||||
dim[number] = pkt->get<uint64_t>();
|
||||
dir[number] = dim[number] & drir;
|
||||
for(int x = 0; x < 64; x++)
|
||||
for (int x = 0; x < 64; x++)
|
||||
{
|
||||
bitvector = ULL(1) << x;
|
||||
// Figure out which bits have changed
|
||||
if ((dim[number] & bitvector) != (olddim & bitvector))
|
||||
{
|
||||
// The bit is now set and it wasn't before (set)
|
||||
if((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
if ((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
{
|
||||
tsunami->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
|
||||
DPRINTF(Tsunami, "posting dir interrupt to cpu 0\n");
|
||||
|
@ -471,7 +471,7 @@ TsunamiCChip::postDRIR(uint32_t interrupt)
|
|||
assert(size <= Tsunami::Max_CPUs);
|
||||
drir |= bitvector;
|
||||
|
||||
for(int i=0; i < size; i++) {
|
||||
for (int i=0; i < size; i++) {
|
||||
dir[i] = dim[i] & drir;
|
||||
if (dim[i] & bitvector) {
|
||||
tsunami->intrctrl->post(i, TheISA::INTLEVEL_IRQ1, interrupt);
|
||||
|
@ -491,7 +491,7 @@ TsunamiCChip::clearDRIR(uint32_t interrupt)
|
|||
if (drir & bitvector)
|
||||
{
|
||||
drir &= ~bitvector;
|
||||
for(int i=0; i < size; i++) {
|
||||
for (int i=0; i < size; i++) {
|
||||
if (dir[i] & bitvector) {
|
||||
tsunami->intrctrl->clear(i, TheISA::INTLEVEL_IRQ1, interrupt);
|
||||
DPRINTF(Tsunami, "clearing dir interrupt to cpu %d,"
|
||||
|
|
|
@ -98,7 +98,7 @@ FlashDevice::FlashDevice(const FlashDeviceParams* p):
|
|||
* bitwise AND with those two numbers results in an integer with all bits
|
||||
* cleared.
|
||||
*/
|
||||
if(numPlanes & planeMask)
|
||||
if (numPlanes & planeMask)
|
||||
fatal("Number of planes is not a power of 2 in flash device.\n");
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ FlashDevice::accessDevice(uint64_t address, uint32_t amount, Callback *event,
|
|||
DPRINTF(FlashDevice, "Plane %d is busy for %d ticks\n", count,
|
||||
time[count]);
|
||||
|
||||
if (time[count] != 0) {
|
||||
if (time[count] != 0) {
|
||||
|
||||
struct CallBackEntry cbe;
|
||||
/**
|
||||
|
|
|
@ -693,7 +693,7 @@ UFSHostDevice::UFSSCSIDevice::readFlash(uint8_t* readaddr, uint64_t offset,
|
|||
uint32_t size)
|
||||
{
|
||||
/** read from image, and get to memory */
|
||||
for(int count = 0; count < (size / SectorSize); count++)
|
||||
for (int count = 0; count < (size / SectorSize); count++)
|
||||
flashDisk->read(&(readaddr[SectorSize*count]), (offset /
|
||||
SectorSize) + count);
|
||||
}
|
||||
|
@ -707,7 +707,7 @@ UFSHostDevice::UFSSCSIDevice::writeFlash(uint8_t* writeaddr, uint64_t offset,
|
|||
uint32_t size)
|
||||
{
|
||||
/** Get from fifo and write to image*/
|
||||
for(int count = 0; count < (size / SectorSize); count++)
|
||||
for (int count = 0; count < (size / SectorSize); count++)
|
||||
flashDisk->write(&(writeaddr[SectorSize * count]),
|
||||
(offset / SectorSize) + count);
|
||||
}
|
||||
|
@ -745,7 +745,7 @@ UFSHostDevice::UFSHostDevice(const UFSHostDeviceParams* p) :
|
|||
memReadCallback = new MakeCallback<UFSHostDevice,
|
||||
&UFSHostDevice::readCallback>(this);
|
||||
|
||||
for(int count = 0; count < lunAvail; count++) {
|
||||
for (int count = 0; count < lunAvail; count++) {
|
||||
UFSDevice[count] = new UFSSCSIDevice(p, count, transferDoneCallback,
|
||||
memReadCallback);
|
||||
}
|
||||
|
@ -1672,7 +1672,7 @@ UFSHostDevice::LUNSignal()
|
|||
uint8_t this_lun = 0;
|
||||
|
||||
//while we haven't found the right lun, keep searching
|
||||
while((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedCommand())
|
||||
while ((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedCommand())
|
||||
++this_lun;
|
||||
|
||||
if (this_lun < lunAvail) {
|
||||
|
@ -1796,13 +1796,13 @@ UFSHostDevice::readDone()
|
|||
}
|
||||
|
||||
/**done, generate interrupt if we havent got one already*/
|
||||
if(!(UFSHCIMem.ORInterruptStatus & 0x01)) {
|
||||
if (!(UFSHCIMem.ORInterruptStatus & 0x01)) {
|
||||
UFSHCIMem.ORInterruptStatus |= UTPTransferREQCOMPL;
|
||||
generateInterrupt();
|
||||
}
|
||||
|
||||
|
||||
if(!readDoneEvent.empty()) {
|
||||
if (!readDoneEvent.empty()) {
|
||||
readDoneEvent.pop_front();
|
||||
}
|
||||
}
|
||||
|
@ -1884,7 +1884,7 @@ UFSHostDevice::writeDevice(Event* additional_action, bool toDisk, Addr
|
|||
if (toDisk) {
|
||||
++writePendingNum;
|
||||
|
||||
while(!writeDoneEvent.empty() && (writeDoneEvent.front().when()
|
||||
while (!writeDoneEvent.empty() && (writeDoneEvent.front().when()
|
||||
< curTick()))
|
||||
writeDoneEvent.pop_front();
|
||||
|
||||
|
@ -2243,7 +2243,7 @@ UFSHostDevice::readCallback()
|
|||
uint8_t this_lun = 0;
|
||||
|
||||
//while we haven't found the right lun, keep searching
|
||||
while((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedRead())
|
||||
while ((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedRead())
|
||||
++this_lun;
|
||||
|
||||
DPRINTF(UFSHostDevice, "Found LUN %d messages pending for clean: %d\n",
|
||||
|
|
|
@ -110,7 +110,7 @@ void
|
|||
Intel8254Timer::Counter::latchCount()
|
||||
{
|
||||
// behave like a real latch
|
||||
if(!latch_on) {
|
||||
if (!latch_on) {
|
||||
latch_on = true;
|
||||
read_byte = LSB;
|
||||
latched_count = currentCount();
|
||||
|
@ -207,7 +207,7 @@ Intel8254Timer::Counter::setRW(int rw_val)
|
|||
void
|
||||
Intel8254Timer::Counter::setMode(int mode_val)
|
||||
{
|
||||
if(mode_val != InitTc && mode_val != RateGen &&
|
||||
if (mode_val != InitTc && mode_val != RateGen &&
|
||||
mode_val != SquareWave)
|
||||
panic("PIT mode %#x is not implemented: \n", mode_val);
|
||||
|
||||
|
|
|
@ -207,14 +207,14 @@ MaltaCChip::write(PacketPtr pkt)
|
|||
olddir = dir[number];
|
||||
dim[number] = pkt->get<uint64_t>();
|
||||
dir[number] = dim[number] & drir;
|
||||
for(int x = 0; x < Malta::Max_CPUs; x++)
|
||||
for (int x = 0; x < Malta::Max_CPUs; x++)
|
||||
{
|
||||
bitvector = ULL(1) << x;
|
||||
// Figure out which bits have changed
|
||||
if ((dim[number] & bitvector) != (olddim & bitvector))
|
||||
{
|
||||
// The bit is now set and it wasn't before (set)
|
||||
if((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
if ((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
{
|
||||
malta->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
|
||||
DPRINTF(Malta, "dim write resulting in posting dir"
|
||||
|
@ -269,7 +269,7 @@ MaltaCChip::write(PacketPtr pkt)
|
|||
if (pkt->get<uint64_t>() & 0x10000000)
|
||||
supportedWrite = true;
|
||||
|
||||
if(!supportedWrite)
|
||||
if (!supportedWrite)
|
||||
panic("TSDEV_CC_MISC write not implemented\n");
|
||||
|
||||
break;
|
||||
|
@ -283,11 +283,11 @@ MaltaCChip::write(PacketPtr pkt)
|
|||
case TSDEV_CC_DIM2:
|
||||
case TSDEV_CC_DIM3:
|
||||
int number;
|
||||
if(regnum == TSDEV_CC_DIM0)
|
||||
if (regnum == TSDEV_CC_DIM0)
|
||||
number = 0;
|
||||
else if(regnum == TSDEV_CC_DIM1)
|
||||
else if (regnum == TSDEV_CC_DIM1)
|
||||
number = 1;
|
||||
else if(regnum == TSDEV_CC_DIM2)
|
||||
else if (regnum == TSDEV_CC_DIM2)
|
||||
number = 2;
|
||||
else
|
||||
number = 3;
|
||||
|
@ -300,14 +300,14 @@ MaltaCChip::write(PacketPtr pkt)
|
|||
olddir = dir[number];
|
||||
dim[number] = pkt->get<uint64_t>();
|
||||
dir[number] = dim[number] & drir;
|
||||
for(int x = 0; x < 64; x++)
|
||||
for (int x = 0; x < 64; x++)
|
||||
{
|
||||
bitvector = ULL(1) << x;
|
||||
// Figure out which bits have changed
|
||||
if ((dim[number] & bitvector) != (olddim & bitvector))
|
||||
{
|
||||
// The bit is now set and it wasn't before (set)
|
||||
if((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
if ((dim[number] & bitvector) && (dir[number] & bitvector))
|
||||
{
|
||||
malta->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
|
||||
DPRINTF(Malta, "posting dir interrupt to cpu 0\n");
|
||||
|
@ -476,7 +476,7 @@ MaltaCChip::postIntr(uint32_t interrupt)
|
|||
uint64_t size = sys->threadContexts.size();
|
||||
assert(size <= Malta::Max_CPUs);
|
||||
|
||||
for(int i=0; i < size; i++) {
|
||||
for (int i=0; i < size; i++) {
|
||||
//Note: Malta does not use index, but this was added to use the pre-existing implementation
|
||||
malta->intrctrl->post(i, interrupt, 0);
|
||||
DPRINTF(Malta, "posting interrupt to cpu %d,"
|
||||
|
@ -491,7 +491,7 @@ MaltaCChip::clearIntr(uint32_t interrupt)
|
|||
uint64_t size = sys->threadContexts.size();
|
||||
assert(size <= Malta::Max_CPUs);
|
||||
|
||||
for(int i=0; i < size; i++) {
|
||||
for (int i=0; i < size; i++) {
|
||||
//Note: Malta does not use index, but this was added to use the pre-existing implementation
|
||||
malta->intrctrl->clear(i, interrupt, 0);
|
||||
DPRINTF(Malta, "clearing interrupt to cpu %d,"
|
||||
|
|
|
@ -93,7 +93,7 @@ VirtDescriptor::updateChain()
|
|||
VirtDescriptor *desc(this);
|
||||
do {
|
||||
desc->update();
|
||||
} while((desc = desc->next()) != NULL && desc != this);
|
||||
} while ((desc = desc->next()) != NULL && desc != this);
|
||||
|
||||
if (desc == this)
|
||||
panic("Loop in descriptor chain!\n");
|
||||
|
@ -125,7 +125,7 @@ VirtDescriptor::dumpChain() const
|
|||
const VirtDescriptor *desc(this);
|
||||
do {
|
||||
desc->dump();
|
||||
} while((desc = desc->next()) != NULL);
|
||||
} while ((desc = desc->next()) != NULL);
|
||||
}
|
||||
|
||||
VirtDescriptor *
|
||||
|
@ -177,7 +177,7 @@ VirtDescriptor::chainRead(size_t offset, uint8_t *dst, size_t size) const
|
|||
} else {
|
||||
offset -= desc->size();
|
||||
}
|
||||
} while((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
|
||||
} while ((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
|
||||
|
||||
if (size != 0) {
|
||||
panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
|
||||
|
@ -200,7 +200,7 @@ VirtDescriptor::chainWrite(size_t offset, const uint8_t *src, size_t size)
|
|||
} else {
|
||||
offset -= desc->size();
|
||||
}
|
||||
} while((desc = desc->next()) != NULL && size > 0);
|
||||
} while ((desc = desc->next()) != NULL && size > 0);
|
||||
|
||||
if (size != 0) {
|
||||
panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
|
||||
|
@ -215,7 +215,7 @@ VirtDescriptor::chainSize() const
|
|||
const VirtDescriptor *desc(this);
|
||||
do {
|
||||
size += desc->size();
|
||||
} while((desc = desc->next()) != NULL);
|
||||
} while ((desc = desc->next()) != NULL);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ VirtQueue::onNotify()
|
|||
|
||||
// Consume all pending descriptors from the input queue.
|
||||
VirtDescriptor *d;
|
||||
while((d = consumeDescriptor()) != NULL)
|
||||
while ((d = consumeDescriptor()) != NULL)
|
||||
onNotifyDescriptor(d);
|
||||
}
|
||||
|
||||
|
|
|
@ -385,7 +385,7 @@ Bridge::BridgeMasterPort::checkFunctional(PacketPtr pkt)
|
|||
bool found = false;
|
||||
auto i = transmitList.begin();
|
||||
|
||||
while(i != transmitList.end() && !found) {
|
||||
while (i != transmitList.end() && !found) {
|
||||
if (pkt->checkFunctional((*i).pkt)) {
|
||||
pkt->makeResponse();
|
||||
found = true;
|
||||
|
|
2
src/mem/cache/prefetch/stride.cc
vendored
2
src/mem/cache/prefetch/stride.cc
vendored
|
@ -114,7 +114,7 @@ StridePrefetcher::calculatePrefetch(const PacketPtr &pkt,
|
|||
// Lookup pc-based information
|
||||
StrideEntry *entry;
|
||||
|
||||
if(pcTableHit(pc, is_secure, master_id, entry)) {
|
||||
if (pcTableHit(pc, is_secure, master_id, entry)) {
|
||||
// Hit in table
|
||||
int new_stride = pkt_addr - entry->lastAddr;
|
||||
bool stride_match = (new_stride == entry->stride);
|
||||
|
|
|
@ -723,7 +723,7 @@ DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay)
|
|||
|
||||
if (memSchedPolicy == Enums::fcfs) {
|
||||
// check if there is a packet going to a free rank
|
||||
for(auto i = queue.begin(); i != queue.end() ; ++i) {
|
||||
for (auto i = queue.begin(); i != queue.end() ; ++i) {
|
||||
DRAMPacket* dram_pkt = *i;
|
||||
if (ranks[dram_pkt->rank]->isAvailable()) {
|
||||
queue.erase(i);
|
||||
|
@ -911,7 +911,7 @@ DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
|
|||
bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
|
||||
|
||||
// start by enforcing tRRD
|
||||
for(int i = 0; i < banksPerRank; i++) {
|
||||
for (int i = 0; i < banksPerRank; i++) {
|
||||
// next activate to any bank in this rank must not happen
|
||||
// before tRRD
|
||||
if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
|
||||
|
@ -956,7 +956,7 @@ DRAMCtrl::activateBank(Rank& rank_ref, Bank& bank_ref,
|
|||
DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
|
||||
"no earlier than %llu\n", activationLimit,
|
||||
rank_ref.actTicks.back() + tXAW);
|
||||
for(int j = 0; j < banksPerRank; j++)
|
||||
for (int j = 0; j < banksPerRank; j++)
|
||||
// next activate must not happen before end of window
|
||||
rank_ref.banks[j].actAllowedAt =
|
||||
std::max(rank_ref.actTicks.back() + tXAW,
|
||||
|
@ -1073,8 +1073,8 @@ DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
|
|||
// update the time for the next read/write burst for each
|
||||
// bank (add a max with tCCD/tCCD_L here)
|
||||
Tick cmd_dly;
|
||||
for(int j = 0; j < ranksPerChannel; j++) {
|
||||
for(int i = 0; i < banksPerRank; i++) {
|
||||
for (int j = 0; j < ranksPerChannel; j++) {
|
||||
for (int i = 0; i < banksPerRank; i++) {
|
||||
// next burst to same bank group in this rank must not happen
|
||||
// before tCCD_L. Different bank group timing requirement is
|
||||
// tBURST; Add tCS for different ranks
|
||||
|
@ -1454,7 +1454,7 @@ DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue,
|
|||
// bank in question
|
||||
vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
|
||||
for (const auto& p : queue) {
|
||||
if(p->rankRef.isAvailable())
|
||||
if (p->rankRef.isAvailable())
|
||||
got_waiting[p->bankId] = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -373,7 +373,7 @@ PhysicalMemory::unserialize(CheckpointIn &cp)
|
|||
vector<ContextID> lal_cid;
|
||||
UNSERIALIZE_CONTAINER(lal_addr);
|
||||
UNSERIALIZE_CONTAINER(lal_cid);
|
||||
for(size_t i = 0; i < lal_addr.size(); ++i) {
|
||||
for (size_t i = 0; i < lal_addr.size(); ++i) {
|
||||
const auto& m = addrMap.find(lal_addr[i]);
|
||||
m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ BaseMasterPort::~BaseMasterPort()
|
|||
BaseSlavePort&
|
||||
BaseMasterPort::getSlavePort() const
|
||||
{
|
||||
if(_baseSlavePort == NULL)
|
||||
if (_baseSlavePort == NULL)
|
||||
panic("Cannot getSlavePort on master port %s that is not connected\n",
|
||||
name());
|
||||
|
||||
|
@ -98,7 +98,7 @@ BaseSlavePort::~BaseSlavePort()
|
|||
BaseMasterPort&
|
||||
BaseSlavePort::getMasterPort() const
|
||||
{
|
||||
if(_baseMasterPort == NULL)
|
||||
if (_baseMasterPort == NULL)
|
||||
panic("Cannot getMasterPort on slave port %s that is not connected\n",
|
||||
name());
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ BulkBloomFilter::isSet(Addr addr)
|
|||
|
||||
// check second section
|
||||
zero = false;
|
||||
for(int i = m_filter_size / 2; i < m_filter_size; ++i) {
|
||||
for (int i = m_filter_size / 2; i < m_filter_size; ++i) {
|
||||
// get intersection of signatures
|
||||
m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
|
||||
zero = zero || m_temp_filter[i];
|
||||
|
|
|
@ -419,7 +419,7 @@ H3BloomFilter::merge(AbstractBloomFilter *other_filter)
|
|||
{
|
||||
// assumes both filters are the same size!
|
||||
H3BloomFilter * temp = (H3BloomFilter*) other_filter;
|
||||
for(int i = 0; i < m_filter_size; ++i){
|
||||
for (int i = 0; i < m_filter_size; ++i){
|
||||
m_filter[i] |= (*temp)[i];
|
||||
}
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ H3BloomFilter::hash_H3(uint64_t value, int index)
|
|||
int result = 0;
|
||||
|
||||
for (int i = 0; i < 64; i++) {
|
||||
if(val&mask) result ^= H3[i][index];
|
||||
if (val&mask) result ^= H3[i][index];
|
||||
val = val >> 1;
|
||||
}
|
||||
return result;
|
||||
|
|
|
@ -93,7 +93,7 @@ MultiBitSelBloomFilter::merge(AbstractBloomFilter *other_filter)
|
|||
{
|
||||
// assumes both filters are the same size!
|
||||
MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
|
||||
for(int i = 0; i < m_filter_size; ++i){
|
||||
for (int i = 0; i < m_filter_size; ++i){
|
||||
m_filter[i] |= (*temp)[i];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ MultiGrainBloomFilter::clear()
|
|||
for (int i = 0; i < m_filter_size; i++) {
|
||||
m_filter[i] = 0;
|
||||
}
|
||||
for(int i=0; i < m_page_filter_size; ++i){
|
||||
for (int i=0; i < m_page_filter_size; ++i){
|
||||
m_page_filter[i] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ MultiGrainBloomFilter::getTotalCount()
|
|||
count += m_filter[i];
|
||||
}
|
||||
|
||||
for(int i=0; i < m_page_filter_size; ++i) {
|
||||
for (int i=0; i < m_page_filter_size; ++i) {
|
||||
count += m_page_filter[i] = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ NonCountingBloomFilter::merge(AbstractBloomFilter *other_filter)
|
|||
{
|
||||
// assumes both filters are the same size!
|
||||
NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
|
||||
for(int i = 0; i < m_filter_size; ++i){
|
||||
for (int i = 0; i < m_filter_size; ++i){
|
||||
m_filter[i] |= (*temp)[i];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -263,7 +263,7 @@ MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
|
|||
void
|
||||
MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick schdTick)
|
||||
{
|
||||
while(!lt.empty()) {
|
||||
while (!lt.empty()) {
|
||||
m_msg_counter++;
|
||||
MsgPtr m = lt.front();
|
||||
m->setLastEnqueueTime(schdTick);
|
||||
|
|
|
@ -52,7 +52,7 @@ GarnetNetwork_d::GarnetNetwork_d(const Params *p)
|
|||
|
||||
m_vnet_type.resize(m_virtual_networks);
|
||||
|
||||
for(int i = 0 ; i < m_virtual_networks ; i++)
|
||||
for (int i = 0 ; i < m_virtual_networks ; i++)
|
||||
{
|
||||
if (m_vnet_type_names[i] == "response")
|
||||
m_vnet_type[i] = DATA_VNET_; // carries data (and ctrl) packets
|
||||
|
@ -94,7 +94,7 @@ GarnetNetwork_d::init()
|
|||
m_topology_ptr->createLinks(this);
|
||||
|
||||
// FaultModel: declare each router to the fault model
|
||||
if(isFaultModelEnabled()){
|
||||
if (isFaultModelEnabled()){
|
||||
for (vector<Router_d*>::const_iterator i= m_routers.begin();
|
||||
i != m_routers.end(); ++i) {
|
||||
Router_d* router = safe_cast<Router_d*>(*i);
|
||||
|
|
|
@ -185,7 +185,7 @@ NetworkInterface_d::calculateVC(int vnet)
|
|||
for (int i = 0; i < m_vc_per_vnet; i++) {
|
||||
int delta = m_vc_allocator[vnet];
|
||||
m_vc_allocator[vnet]++;
|
||||
if(m_vc_allocator[vnet] == m_vc_per_vnet)
|
||||
if (m_vc_allocator[vnet] == m_vc_per_vnet)
|
||||
m_vc_allocator[vnet] = 0;
|
||||
|
||||
if (m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState(
|
||||
|
|
|
@ -61,7 +61,7 @@ PerfectSwitch::init(SimpleNetwork *network_ptr)
|
|||
{
|
||||
m_network_ptr = network_ptr;
|
||||
|
||||
for(int i = 0;i < m_virtual_networks;++i) {
|
||||
for (int i = 0;i < m_virtual_networks;++i) {
|
||||
m_pending_message_count.push_back(0);
|
||||
}
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ PerfectSwitch::operateVnet(int vnet)
|
|||
m_round_robin_start = 0;
|
||||
}
|
||||
|
||||
if(m_pending_message_count[vnet] > 0) {
|
||||
if (m_pending_message_count[vnet] > 0) {
|
||||
// for all input ports, use round robin scheduling
|
||||
for (int counter = 0; counter < m_in.size(); counter++) {
|
||||
// Round robin scheduling
|
||||
|
|
|
@ -64,9 +64,9 @@ AccessTraceForAddress::update(RubyRequestType type,
|
|||
{
|
||||
m_touched_by.add(cpu);
|
||||
m_total++;
|
||||
if(type == RubyRequestType_ATOMIC) {
|
||||
if (type == RubyRequestType_ATOMIC) {
|
||||
m_atomics++;
|
||||
} else if(type == RubyRequestType_LD){
|
||||
} else if (type == RubyRequestType_LD){
|
||||
m_loads++;
|
||||
} else if (type == RubyRequestType_ST){
|
||||
m_stores++;
|
||||
|
|
|
@ -156,7 +156,7 @@ AbstractController::wakeUpAllBuffers()
|
|||
std::vector<MsgVecType*> wokeUpMsgVecs;
|
||||
MsgBufType wokeUpMsgBufs;
|
||||
|
||||
if(m_waiting_buffers.size() > 0) {
|
||||
if (m_waiting_buffers.size() > 0) {
|
||||
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
|
||||
buf_iter != m_waiting_buffers.end();
|
||||
++buf_iter) {
|
||||
|
|
|
@ -36,9 +36,9 @@ AbstractReplacementPolicy::AbstractReplacementPolicy(const Params * p)
|
|||
m_num_sets = p->size/p->block_size/p->assoc;
|
||||
m_assoc = p->assoc;
|
||||
m_last_ref_ptr = new Tick*[m_num_sets];
|
||||
for(unsigned i = 0; i < m_num_sets; i++){
|
||||
for (unsigned i = 0; i < m_num_sets; i++){
|
||||
m_last_ref_ptr[i] = new Tick[m_assoc];
|
||||
for(unsigned j = 0; j < m_assoc; j++){
|
||||
for (unsigned j = 0; j < m_assoc; j++){
|
||||
m_last_ref_ptr[i][j] = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ BankedArray::reserve(int64_t idx)
|
|||
unsigned int bank = mapIndexToBank(idx);
|
||||
assert(bank < banks);
|
||||
|
||||
if(busyBanks[bank].endAccess >= curTick()) {
|
||||
if (busyBanks[bank].endAccess >= curTick()) {
|
||||
if (busyBanks[bank].startAccess == curTick() &&
|
||||
busyBanks[bank].idx == idx) {
|
||||
// this is the same reservation (can happen when
|
||||
|
|
|
@ -323,7 +323,7 @@ CacheMemory::lookup(Addr address)
|
|||
assert(address == makeLineAddress(address));
|
||||
int64_t cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
if(loc == -1) return NULL;
|
||||
if (loc == -1) return NULL;
|
||||
return m_cache[cacheSet][loc];
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,7 @@ CacheMemory::lookup(Addr address) const
|
|||
assert(address == makeLineAddress(address));
|
||||
int64_t cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
if(loc == -1) return NULL;
|
||||
if (loc == -1) return NULL;
|
||||
return m_cache[cacheSet][loc];
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ CacheMemory::setMRU(Addr address)
|
|||
int64_t cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
|
||||
if(loc != -1)
|
||||
if (loc != -1)
|
||||
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
|
||||
}
|
||||
|
||||
|
@ -363,7 +363,7 @@ CacheMemory::setMRU(Addr address, int occupancy)
|
|||
int64_t cacheSet = addressToCacheSet(address);
|
||||
int loc = findTagInSet(cacheSet, address);
|
||||
|
||||
if(loc != -1) {
|
||||
if (loc != -1) {
|
||||
if (m_replacementPolicy_ptr->useOccupancy()) {
|
||||
(static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
|
||||
touch(cacheSet, loc, curTick(), occupancy);
|
||||
|
@ -380,7 +380,7 @@ CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
|
|||
assert(set < m_cache_num_sets);
|
||||
assert(loc < m_cache_assoc);
|
||||
int ret = 0;
|
||||
if(m_cache[set][loc] != NULL) {
|
||||
if (m_cache[set][loc] != NULL) {
|
||||
ret = m_cache[set][loc]->getNumValidBlocks();
|
||||
assert(ret >= 0);
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p)
|
|||
int tmp_assoc = m_effective_assoc;
|
||||
while (true) {
|
||||
tmp_assoc /= 2;
|
||||
if(!tmp_assoc) break;
|
||||
if (!tmp_assoc) break;
|
||||
m_num_levels++;
|
||||
}
|
||||
assert(m_num_levels < sizeof(unsigned int)*4);
|
||||
|
|
|
@ -639,7 +639,7 @@ DrainState
|
|||
RubyMemoryControl::drain()
|
||||
{
|
||||
DPRINTF(RubyMemory, "MemoryController drain\n");
|
||||
if(m_event.scheduled()) {
|
||||
if (m_event.scheduled()) {
|
||||
deschedule(m_event);
|
||||
}
|
||||
return DrainState::Drained;
|
||||
|
|
|
@ -110,7 +110,7 @@ template<class ENTRY>
|
|||
inline ENTRY*
|
||||
TBETable<ENTRY>::lookup(Addr address)
|
||||
{
|
||||
if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
|
||||
if (m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -239,7 +239,7 @@ GPUCoalescer::getRequestStatus(PacketPtr pkt, RubyRequestType request_type)
|
|||
return RequestStatus_BufferFull;
|
||||
}
|
||||
|
||||
if(m_controller->isBlocked(line_addr) &&
|
||||
if (m_controller->isBlocked(line_addr) &&
|
||||
request_type != RubyRequestType_Locked_RMW_Write) {
|
||||
return RequestStatus_Aliased;
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ GPUCoalescer::writeCallback(Addr address,
|
|||
// Not valid for Network_test protocl
|
||||
//
|
||||
bool success = true;
|
||||
if(!m_usingNetworkTester)
|
||||
if (!m_usingNetworkTester)
|
||||
success = handleLlsc(address, request);
|
||||
|
||||
if (request->m_type == RubyRequestType_Locked_RMW_Read) {
|
||||
|
@ -704,7 +704,7 @@ GPUCoalescer::makeRequest(PacketPtr pkt)
|
|||
// This is a Kernel Begin leave handling to
|
||||
// virtual xCoalescer::makeRequest
|
||||
return RequestStatus_Issued;
|
||||
}else if(pkt->req->isRelease()) {
|
||||
}else if (pkt->req->isRelease()) {
|
||||
// This is a Kernel End leave handling to
|
||||
// virtual xCoalescer::makeRequest
|
||||
// If we are here then we didn't call
|
||||
|
@ -917,7 +917,7 @@ GPUCoalescer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
|
|||
std::pair<int,AtomicOpFunctor *> tmpAtomicOp(tmpOffset,
|
||||
tmpPkt->getAtomicOp());
|
||||
atomicOps.push_back(tmpAtomicOp);
|
||||
} else if(tmpPkt->isWrite()) {
|
||||
} else if (tmpPkt->isWrite()) {
|
||||
dataBlock.setData(tmpPkt->getPtr<uint8_t>(),
|
||||
tmpOffset, tmpSize);
|
||||
}
|
||||
|
@ -1151,11 +1151,11 @@ GPUCoalescer::atomicCallback(Addr address,
|
|||
void
|
||||
GPUCoalescer::recordCPReadCallBack(MachineID myMachID, MachineID senderMachID)
|
||||
{
|
||||
if(myMachID == senderMachID) {
|
||||
if (myMachID == senderMachID) {
|
||||
CP_TCPLdHits++;
|
||||
} else if(machineIDToMachineType(senderMachID) == MachineType_TCP) {
|
||||
} else if (machineIDToMachineType(senderMachID) == MachineType_TCP) {
|
||||
CP_TCPLdTransfers++;
|
||||
} else if(machineIDToMachineType(senderMachID) == MachineType_TCC) {
|
||||
} else if (machineIDToMachineType(senderMachID) == MachineType_TCC) {
|
||||
CP_TCCLdHits++;
|
||||
} else {
|
||||
CP_LdMiss++;
|
||||
|
@ -1165,11 +1165,11 @@ GPUCoalescer::recordCPReadCallBack(MachineID myMachID, MachineID senderMachID)
|
|||
void
|
||||
GPUCoalescer::recordCPWriteCallBack(MachineID myMachID, MachineID senderMachID)
|
||||
{
|
||||
if(myMachID == senderMachID) {
|
||||
if (myMachID == senderMachID) {
|
||||
CP_TCPStHits++;
|
||||
} else if(machineIDToMachineType(senderMachID) == MachineType_TCP) {
|
||||
} else if (machineIDToMachineType(senderMachID) == MachineType_TCP) {
|
||||
CP_TCPStTransfers++;
|
||||
} else if(machineIDToMachineType(senderMachID) == MachineType_TCC) {
|
||||
} else if (machineIDToMachineType(senderMachID) == MachineType_TCC) {
|
||||
CP_TCCStHits++;
|
||||
} else {
|
||||
CP_StMiss++;
|
||||
|
|
|
@ -379,7 +379,7 @@ Sequencer::writeCallback(Addr address, DataBlock& data,
|
|||
// Not valid for Network_test protocl
|
||||
//
|
||||
bool success = true;
|
||||
if(!m_usingNetworkTester)
|
||||
if (!m_usingNetworkTester)
|
||||
success = handleLlsc(address, request);
|
||||
|
||||
if (request->m_type == RubyRequestType_Locked_RMW_Read) {
|
||||
|
|
|
@ -117,7 +117,7 @@ VIPERCoalescer::makeRequest(PacketPtr pkt)
|
|||
// isKernel + isRelease
|
||||
insertKernel(pkt->req->contextId(), pkt);
|
||||
wbL1();
|
||||
if(m_outstanding_wb == 0) {
|
||||
if (m_outstanding_wb == 0) {
|
||||
for (auto it = kernelEndList.begin(); it != kernelEndList.end(); it++) {
|
||||
newKernelEnds.push_back(it->first);
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ VIPERCoalescer::invwbL1()
|
|||
{
|
||||
int size = m_dataCache_ptr->getNumBlocks();
|
||||
// Walk the cache
|
||||
for(int i = 0; i < size; i++) {
|
||||
for (int i = 0; i < size; i++) {
|
||||
Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
|
||||
// Evict Read-only data
|
||||
std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
|
||||
|
@ -273,7 +273,7 @@ VIPERCoalescer::invwbL1()
|
|||
m_outstanding_inv++;
|
||||
}
|
||||
// Walk the cache
|
||||
for(int i = 0; i< size; i++) {
|
||||
for (int i = 0; i< size; i++) {
|
||||
Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
|
||||
// Write dirty data back
|
||||
std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
|
||||
|
|
|
@ -39,9 +39,9 @@ WeightedLRUPolicy::WeightedLRUPolicy(const Params* p)
|
|||
: AbstractReplacementPolicy(p), m_cache(p->cache)
|
||||
{
|
||||
m_last_occ_ptr = new int*[m_num_sets];
|
||||
for(unsigned i = 0; i < m_num_sets; i++){
|
||||
for (unsigned i = 0; i < m_num_sets; i++){
|
||||
m_last_occ_ptr[i] = new int[m_assoc];
|
||||
for(unsigned j = 0; j < m_assoc; j++){
|
||||
for (unsigned j = 0; j < m_assoc; j++){
|
||||
m_last_occ_ptr[i][j] = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -413,7 +413,7 @@ SerialLink::SerialLinkMasterPort::checkFunctional(PacketPtr pkt)
|
|||
bool found = false;
|
||||
auto i = transmitList.begin();
|
||||
|
||||
while(i != transmitList.end() && !found) {
|
||||
while (i != transmitList.end() && !found) {
|
||||
if (pkt->checkFunctional((*i).pkt)) {
|
||||
pkt->makeResponse();
|
||||
found = true;
|
||||
|
|
|
@ -216,13 +216,13 @@ StackDistCalc::getSum(Node* node, bool from_left, uint64_t sum_from_below,
|
|||
++level;
|
||||
// Variable stack_dist is updated only
|
||||
// when arriving from Left.
|
||||
if(from_left) {
|
||||
if (from_left) {
|
||||
stack_dist += node->sumRight;
|
||||
}
|
||||
|
||||
// Recursively call the getSum operation till the
|
||||
// root node is reached
|
||||
if(node->parent) {
|
||||
if (node->parent) {
|
||||
stack_dist = getSum(node->parent, node->isLeftNode,
|
||||
node->sumLeft + node->sumRight,
|
||||
stack_dist, level);
|
||||
|
|
|
@ -119,7 +119,7 @@ connectPorts(SimObject *o1, const std::string &name1, int i1,
|
|||
mo1 = dynamic_cast<MemObject*>(o1);
|
||||
mo2 = dynamic_cast<MemObject*>(o2);
|
||||
|
||||
if(mo1 == NULL || mo2 == NULL) {
|
||||
if (mo1 == NULL || mo2 == NULL) {
|
||||
panic ("Error casting SimObjects %s and %s to MemObject", o1->name(),
|
||||
o2->name());
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
do { \
|
||||
static const char msg[] = m; \
|
||||
atomic_write(STDERR_FILENO, msg, sizeof(msg) - 1); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
void
|
||||
print_backtrace()
|
||||
|
|
|
@ -63,7 +63,7 @@ DVFSHandler::DVFSHandler(const Params *p)
|
|||
{
|
||||
// Check supplied list of domains for sanity and add them to the
|
||||
// domain ID -> domain* hash
|
||||
for(auto dit = p->domains.begin(); dit != p->domains.end(); ++dit) {
|
||||
for (auto dit = p->domains.begin(); dit != p->domains.end(); ++dit) {
|
||||
SrcClockDomain *d = *dit;
|
||||
DomainID domain_id = d->domainID();
|
||||
|
||||
|
@ -203,7 +203,7 @@ DVFSHandler::unserialize(CheckpointIn &cp)
|
|||
|
||||
UNSERIALIZE_SCALAR(enableHandler);
|
||||
|
||||
if(temp != enableHandler) {
|
||||
if (temp != enableHandler) {
|
||||
warn("DVFS: Forcing enable handler status to unserialized value of %d",
|
||||
enableHandler);
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ objParamIn(CheckpointIn &cp, const std::string &name, SimObject * ¶m);
|
|||
do { \
|
||||
event.unserializeSection(cp, #event); \
|
||||
eventQueue()->checkpointReschedule(&event); \
|
||||
} while(0)
|
||||
} while (0)
|
||||
|
||||
#define SERIALIZE_OBJ(obj) obj.serializeSection(cp, #obj)
|
||||
#define UNSERIALIZE_OBJ(obj) obj.unserializeSection(cp, #obj)
|
||||
|
|
|
@ -335,7 +335,7 @@ futexFunc(SyscallDesc *desc, int callnum, LiveProcess *process,
|
|||
int mem_val = *((int *)buf);
|
||||
delete buf;
|
||||
|
||||
if(val != mem_val) {
|
||||
if (val != mem_val) {
|
||||
DPRINTF(SyscallVerbose, "sys_futex: FUTEX_WAKE, read: %d, "
|
||||
"expected: %d\n", mem_val, val);
|
||||
return -OS::TGT_EWOULDBLOCK;
|
||||
|
@ -365,7 +365,7 @@ futexFunc(SyscallDesc *desc, int callnum, LiveProcess *process,
|
|||
tcWaitList->pop_front();
|
||||
wokenUp++;
|
||||
}
|
||||
if(tcWaitList->empty()) {
|
||||
if (tcWaitList->empty()) {
|
||||
futex_map.erase(uaddr);
|
||||
delete tcWaitList;
|
||||
}
|
||||
|
@ -1502,7 +1502,7 @@ timeFunc(SyscallDesc *desc, int callnum, LiveProcess *process,
|
|||
|
||||
int index = 0;
|
||||
Addr taddr = (Addr)process->getSyscallArg(tc, index);
|
||||
if(taddr != 0) {
|
||||
if (taddr != 0) {
|
||||
typename OS::time_t t = sec;
|
||||
t = TheISA::htog(t);
|
||||
SETranslatingPortProxy &p = tc->getMemProxy();
|
||||
|
|
|
@ -71,7 +71,7 @@ __kernel void read_kernel(size_t code_size,
|
|||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
|
||||
if(get_local_id(0) == 0) {
|
||||
if (get_local_id(0) == 0) {
|
||||
int _lcount = atomic_load(&lcount);
|
||||
atomic_fetch_add((atomic_int *)chars_decoded, _lcount);
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ int
|
|||
setupDataStructs()
|
||||
{
|
||||
msg = (char *)memalign(CACHE_LINE_SIZE, (grid_size + 1) * sizeof(char));
|
||||
if(msg == NULL) {
|
||||
if (msg == NULL) {
|
||||
printf("%s:%d: error: %s\n", __FILE__, __LINE__,
|
||||
"could not allocate host buffers\n");
|
||||
exit(-1);
|
||||
|
@ -126,7 +126,7 @@ setupOpenCL()
|
|||
delete platforms;
|
||||
}
|
||||
|
||||
if(NULL == platform) {
|
||||
if (NULL == platform) {
|
||||
printf("NULL platform found so Exiting Application.\n");
|
||||
return FAILURE;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ int main( int argc, char** argv)
|
|||
|
||||
// invalidate flags in this cpu's cache
|
||||
pthread_create(&threads[0], NULL, DoWork1, NULL);
|
||||
while(wait[0]);
|
||||
while (wait[0]);
|
||||
|
||||
// launch thread to invalidate address being monitored
|
||||
pthread_create(&threads[0], NULL, DoWork2, NULL);
|
||||
|
@ -53,17 +53,17 @@ int main( int argc, char** argv)
|
|||
int mwait_cnt = 0;
|
||||
do {
|
||||
pthread_mutex_lock (&mutex);
|
||||
if(flags[0] != 2) {
|
||||
if (flags[0] != 2) {
|
||||
pthread_mutex_unlock (&mutex);
|
||||
__builtin_ia32_mwait(0, 0);
|
||||
} else {
|
||||
pthread_mutex_unlock (&mutex);
|
||||
}
|
||||
mwait_cnt++;
|
||||
} while(flags[0] != 2 && mwait_cnt < NUM_TRIES);
|
||||
} while (flags[0] != 2 && mwait_cnt < NUM_TRIES);
|
||||
|
||||
// test may hang if mwait is not working
|
||||
if(flags[0]==2) {
|
||||
if (flags[0]==2) {
|
||||
printf("mwait regression PASSED, flags[0] = %d\n", flags[0]);
|
||||
} else {
|
||||
printf("mwait regression FAILED, flags[0] = %d\n", flags[0]);
|
||||
|
|
|
@ -271,7 +271,7 @@ AMD64TraceChild::outputStartState(ostream & os)
|
|||
highestInfo = cargv;
|
||||
os << obuf;
|
||||
sp += 8;
|
||||
} while(cargv);
|
||||
} while (cargv);
|
||||
|
||||
//Output the envp pointers
|
||||
int envCount = 0;
|
||||
|
@ -282,7 +282,7 @@ AMD64TraceChild::outputStartState(ostream & os)
|
|||
sp, envCount++, cenvp);
|
||||
os << obuf;
|
||||
sp += 8;
|
||||
} while(cenvp);
|
||||
} while (cenvp);
|
||||
uint64_t auxType, auxVal;
|
||||
do {
|
||||
auxType = ptrace(PTRACE_PEEKDATA, pid, sp, 0);
|
||||
|
@ -292,7 +292,7 @@ AMD64TraceChild::outputStartState(ostream & os)
|
|||
sprintf(obuf, "0x%016lx: Auxiliary vector = {0x%016lx, 0x%016lx}\n",
|
||||
sp - 16, auxType, auxVal);
|
||||
os << obuf;
|
||||
} while(auxType != 0 || auxVal != 0);
|
||||
} while (auxType != 0 || auxVal != 0);
|
||||
//Print out the argument strings, environment strings, and file name.
|
||||
string current;
|
||||
uint64_t buf;
|
||||
|
@ -329,7 +329,7 @@ AMD64TraceChild::findSyscall()
|
|||
for (int i = 0; i < sizeof(uint64_t); i++) {
|
||||
unsigned char byte = buf & 0xFF;
|
||||
if (!foundOpcode) {
|
||||
if(!(byte == 0x66 || //operand override
|
||||
if (!(byte == 0x66 || //operand override
|
||||
byte == 0x67 || //address override
|
||||
byte == 0x2E || //cs
|
||||
byte == 0x3E || //ds
|
||||
|
@ -395,7 +395,7 @@ AMD64TraceChild::step()
|
|||
do {
|
||||
ptraceSingleStep();
|
||||
newPC = getPC();
|
||||
} while(newPC == origPC);
|
||||
} while (newPC == origPC);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -183,12 +183,12 @@ ARMTraceChild::outputStartState(ostream & os)
|
|||
cargv = ptrace(PTRACE_PEEKDATA, pid, sp, 0);
|
||||
sprintf(obuf, "0x%08x: argv[%d] = 0x%08x\n",
|
||||
sp, argCount++, cargv);
|
||||
if(cargv)
|
||||
if(highestInfo < cargv)
|
||||
if (cargv)
|
||||
if (highestInfo < cargv)
|
||||
highestInfo = cargv;
|
||||
os << obuf;
|
||||
sp += 4;
|
||||
} while(cargv);
|
||||
} while (cargv);
|
||||
|
||||
//Output the envp pointers
|
||||
int envCount = 0;
|
||||
|
@ -199,7 +199,7 @@ ARMTraceChild::outputStartState(ostream & os)
|
|||
sp, envCount++, cenvp);
|
||||
os << obuf;
|
||||
sp += 4;
|
||||
} while(cenvp);
|
||||
} while (cenvp);
|
||||
uint32_t auxType, auxVal;
|
||||
do {
|
||||
auxType = ptrace(PTRACE_PEEKDATA, pid, sp, 0);
|
||||
|
@ -209,7 +209,7 @@ ARMTraceChild::outputStartState(ostream & os)
|
|||
sprintf(obuf, "0x%08x: Auxiliary vector = {0x%08x, 0x%08x}\n",
|
||||
sp - 8, auxType, auxVal);
|
||||
os << obuf;
|
||||
} while(auxType != 0 || auxVal != 0);
|
||||
} while (auxType != 0 || auxVal != 0);
|
||||
//Print out the argument strings, environment strings, and file name.
|
||||
string current;
|
||||
uint32_t buf;
|
||||
|
@ -231,7 +231,7 @@ ARMTraceChild::outputStartState(ostream & os)
|
|||
}
|
||||
sp += 4;
|
||||
clearedInitialPadding = clearedInitialPadding || buf != 0;
|
||||
} while(!clearedInitialPadding || buf != 0 || sp <= highestInfo);
|
||||
} while (!clearedInitialPadding || buf != 0 || sp <= highestInfo);
|
||||
return os;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ SparcTraceChild::getTargets(uint32_t inst, uint64_t pc, uint64_t npc,
|
|||
target1 = npc;
|
||||
target2 = npc + 4;
|
||||
return 2;
|
||||
} else if(ba) {
|
||||
} else if (ba) {
|
||||
//This branches immediately to the effective address of the branch
|
||||
//which we'll have to calculate.
|
||||
uint64_t disp = 0;
|
||||
|
@ -249,7 +249,7 @@ SparcTraceChild::getTargets(uint32_t inst, uint64_t pc, uint64_t npc,
|
|||
//smart enough to turn this into a shift.
|
||||
disp *= 4;
|
||||
target1 = pc + disp;
|
||||
} else if(bn)
|
||||
} else if (bn)
|
||||
target1 = npc + 4;
|
||||
else
|
||||
target1 = npc;
|
||||
|
@ -416,7 +416,7 @@ SparcTraceChild::outputStartState(ostream & os)
|
|||
sp, argCount++, cargv);
|
||||
os << obuf;
|
||||
sp += v8 ? 4 : 8;
|
||||
} while(cargv);
|
||||
} while (cargv);
|
||||
//Output the envp pointers
|
||||
int envCount = 0;
|
||||
uint64_t cenvp;
|
||||
|
|
|
@ -249,7 +249,7 @@ SimControl::run()
|
|||
{
|
||||
GlobalSimLoopExitEvent *exit_event = NULL;
|
||||
|
||||
if(sim_end == 0) {
|
||||
if (sim_end == 0) {
|
||||
exit_event = simulate();
|
||||
} else {
|
||||
exit_event = simulate(sim_end);
|
||||
|
|
|
@ -47,7 +47,7 @@ MemoryManager::MemoryManager(): numberOfAllocations(0), numberOfFrees(0)
|
|||
|
||||
MemoryManager::~MemoryManager()
|
||||
{
|
||||
for(gp* payload: freePayloads) {
|
||||
for (gp* payload: freePayloads) {
|
||||
delete payload;
|
||||
numberOfFrees++;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ MemoryManager::~MemoryManager()
|
|||
gp*
|
||||
MemoryManager::allocate()
|
||||
{
|
||||
if(freePayloads.empty()) {
|
||||
if (freePayloads.empty()) {
|
||||
numberOfAllocations++;
|
||||
return new gp(this);
|
||||
} else {
|
||||
|
|
|
@ -191,7 +191,7 @@ sc_transactor::recvTimingReq(PacketPtr packet)
|
|||
/* NOTE: normal tlm is blocking here. But in our case we return false
|
||||
* and tell gem5 when a retry can be done. This is the main difference
|
||||
* in the protocol:
|
||||
* if(requestInProgress)
|
||||
* if (requestInProgress)
|
||||
* {
|
||||
* wait(endRequestEvent);
|
||||
* }
|
||||
|
@ -214,11 +214,11 @@ sc_transactor::recvTimingReq(PacketPtr packet)
|
|||
tlm::tlm_sync_enum status;
|
||||
status = iSocket->nb_transport_fw(*trans, phase, delay);
|
||||
/* Check returned value: */
|
||||
if(status == tlm::TLM_ACCEPTED) {
|
||||
if (status == tlm::TLM_ACCEPTED) {
|
||||
sc_assert(phase == tlm::BEGIN_REQ);
|
||||
/* Accepted but is now blocking until END_REQ (exclusion rule)*/
|
||||
blockingRequest = trans;
|
||||
} else if(status == tlm::TLM_UPDATED) {
|
||||
} else if (status == tlm::TLM_UPDATED) {
|
||||
/* The Timing annotation must be honored: */
|
||||
sc_assert(phase == tlm::END_REQ || phase == tlm::BEGIN_RESP);
|
||||
|
||||
|
@ -226,7 +226,7 @@ sc_transactor::recvTimingReq(PacketPtr packet)
|
|||
pe = new payloadEvent<sc_transactor>(*this,
|
||||
&sc_transactor::pec, "PEQ");
|
||||
pe->notify(*trans, phase, delay);
|
||||
} else if(status == tlm::TLM_COMPLETED) {
|
||||
} else if (status == tlm::TLM_COMPLETED) {
|
||||
/* Transaction is over nothing has do be done. */
|
||||
sc_assert(phase == tlm::END_RESP);
|
||||
trans->release();
|
||||
|
@ -243,7 +243,7 @@ sc_transactor::pec(
|
|||
{
|
||||
sc_time delay;
|
||||
|
||||
if(phase == tlm::END_REQ ||
|
||||
if (phase == tlm::END_REQ ||
|
||||
&trans == blockingRequest && phase == tlm::BEGIN_RESP) {
|
||||
sc_assert(&trans == blockingRequest);
|
||||
blockingRequest = NULL;
|
||||
|
@ -254,7 +254,7 @@ sc_transactor::pec(
|
|||
iSocket.sendRetryReq();
|
||||
}
|
||||
}
|
||||
else if(phase == tlm::BEGIN_RESP)
|
||||
else if (phase == tlm::BEGIN_RESP)
|
||||
{
|
||||
CAUGHT_UP;
|
||||
|
||||
|
|
|
@ -84,12 +84,12 @@ Target::transport_dbg(tlm::tlm_generic_payload& trans)
|
|||
|
||||
/* Load / Store the access: */
|
||||
if ( cmd == tlm::TLM_READ_COMMAND ) {
|
||||
if(debug) {
|
||||
if (debug) {
|
||||
SC_REPORT_INFO("target", "tlm::TLM_READ_COMMAND");
|
||||
}
|
||||
std::memcpy(ptr, mem_array_ptr, len);
|
||||
} else if ( cmd == tlm::TLM_WRITE_COMMAND ) {
|
||||
if(debug) {
|
||||
if (debug) {
|
||||
SC_REPORT_INFO("target", "tlm::TLM_WRITE_COMMAND");
|
||||
}
|
||||
std::memcpy(mem_array_ptr, ptr, len);
|
||||
|
@ -115,8 +115,8 @@ Target::peq_cb(tlm::tlm_generic_payload& trans,
|
|||
{
|
||||
sc_time delay;
|
||||
|
||||
if(phase == tlm::BEGIN_REQ) {
|
||||
if(debug) SC_REPORT_INFO("target", "tlm::BEGIN_REQ");
|
||||
if (phase == tlm::BEGIN_REQ) {
|
||||
if (debug) SC_REPORT_INFO("target", "tlm::BEGIN_REQ");
|
||||
|
||||
/* Increment the transaction reference count */
|
||||
trans.acquire();
|
||||
|
@ -226,12 +226,12 @@ Target::execute_transaction(tlm::tlm_generic_payload& trans)
|
|||
|
||||
/* Load / Store the access: */
|
||||
if ( cmd == tlm::TLM_READ_COMMAND ) {
|
||||
if(debug) {
|
||||
if (debug) {
|
||||
SC_REPORT_INFO("target", "tlm::TLM_READ_COMMAND");
|
||||
}
|
||||
std::memcpy(ptr, mem_array_ptr, len);
|
||||
} else if ( cmd == tlm::TLM_WRITE_COMMAND ) {
|
||||
if(debug) {
|
||||
if (debug) {
|
||||
SC_REPORT_INFO("target", "tlm::TLM_WRITE_COMMAND");
|
||||
}
|
||||
std::memcpy(mem_array_ptr, ptr, len);
|
||||
|
|
Loading…
Reference in a new issue