Clock: Add a Cycles wrapper class and use where applicable

This patch addresses the comments and feedback on the preceding patch
that reworks the clocks and now more clearly shows where cycles
(relative cycle counts) are used to express time.

Instead of bumping the existing patch I chose to make this a separate
patch, merely to try and focus the discussion around a smaller set of
changes. The two patches will be pushed together though.

This changes done as part of this patch are mostly following directly
from the introduction of the wrapper class, and change enough code to
make things compile and run again. There are definitely more places
where int/uint/Tick is still used to represent cycles, and it will
take some time to chase them all down. Similarly, a lot of parameters
should be changed from Param.Tick and Param.Unsigned to
Param.Cycles.

In addition, the use of curTick is questionable as there should not be
an absolute cycle. Potential solutions can be built on top of this
patch. There is a similar situation in the o3 CPU where
lastRunningCycle is currently counting in Cycles, and is still an
absolute time. More discussion to be had in other words.

An additional change that would be appropriate in the future is to
perform a similar wrapping of Tick and probably also introduce a
Ticks class along with suitable operators for all these classes.
This commit is contained in:
Andreas Hansson 2012-08-28 14:30:33 -04:00
parent d53d04473e
commit 0cacf7e817
83 changed files with 340 additions and 256 deletions

View file

@ -44,14 +44,14 @@ class ThreadContext;
namespace AlphaISA { namespace AlphaISA {
inline Tick inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt) handleIprRead(ThreadContext *xc, Packet *pkt)
{ {
panic("No handleIprRead implementation in Alpha\n"); panic("No handleIprRead implementation in Alpha\n");
} }
inline Tick inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt) handleIprWrite(ThreadContext *xc, Packet *pkt)
{ {
panic("No handleIprWrite implementation in Alpha\n"); panic("No handleIprWrite implementation in Alpha\n");

View file

@ -67,7 +67,8 @@ void zeroRegisters(TC *tc);
// Alpha IPR register accessors // Alpha IPR register accessors
inline bool PcPAL(Addr addr) { return addr & 0x3; } inline bool PcPAL(Addr addr) { return addr & 0x3; }
inline void startupCPU(ThreadContext *tc, int cpuId) { tc->activate(0); } inline void startupCPU(ThreadContext *tc, int cpuId)
{ tc->activate(Cycles(0)); }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// //

View file

@ -46,13 +46,13 @@ class ThreadContext;
namespace ArmISA namespace ArmISA
{ {
inline Tick inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt) handleIprRead(ThreadContext *xc, Packet *pkt)
{ {
panic("No implementation for handleIprRead in ARM\n"); panic("No implementation for handleIprRead in ARM\n");
} }
inline Tick inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt) handleIprWrite(ThreadContext *xc, Packet *pkt)
{ {
panic("No implementation for handleIprWrite in ARM\n"); panic("No implementation for handleIprWrite in ARM\n");

View file

@ -240,15 +240,16 @@ TableWalker::processWalk()
if (currState->timing) { if (currState->timing) {
port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t), port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
&doL1DescEvent, (uint8_t*)&currState->l1Desc.data, &doL1DescEvent, (uint8_t*)&currState->l1Desc.data,
currState->tc->getCpuPtr()->ticks(1), flag); currState->tc->getCpuPtr()->clockPeriod(), flag);
DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before "
"adding: %d\n",
stateQueueL1.size()); stateQueueL1.size());
stateQueueL1.push_back(currState); stateQueueL1.push_back(currState);
currState = NULL; currState = NULL;
} else if (!currState->functional) { } else if (!currState->functional) {
port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t), port.dmaAction(MemCmd::ReadReq, l1desc_addr, sizeof(uint32_t),
NULL, (uint8_t*)&currState->l1Desc.data, NULL, (uint8_t*)&currState->l1Desc.data,
currState->tc->getCpuPtr()->ticks(1), flag); currState->tc->getCpuPtr()->clockPeriod(), flag);
doL1Descriptor(); doL1Descriptor();
f = currState->fault; f = currState->fault;
} else { } else {
@ -588,12 +589,12 @@ TableWalker::doL1Descriptor()
if (currState->timing) { if (currState->timing) {
currState->delayed = true; currState->delayed = true;
port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t), port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
&doL2DescEvent, (uint8_t*)&currState->l2Desc.data, &doL2DescEvent, (uint8_t*)&currState->l2Desc.data,
currState->tc->getCpuPtr()->ticks(1)); currState->tc->getCpuPtr()->clockPeriod());
} else if (!currState->functional) { } else if (!currState->functional) {
port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t), port.dmaAction(MemCmd::ReadReq, l2desc_addr, sizeof(uint32_t),
NULL, (uint8_t*)&currState->l2Desc.data, NULL, (uint8_t*)&currState->l2Desc.data,
currState->tc->getCpuPtr()->ticks(1)); currState->tc->getCpuPtr()->clockPeriod());
doL2Descriptor(); doL2Descriptor();
} else { } else {
RequestPtr req = new Request(l2desc_addr, sizeof(uint32_t), 0, RequestPtr req = new Request(l2desc_addr, sizeof(uint32_t), 0,
@ -758,7 +759,7 @@ void
TableWalker::nextWalk(ThreadContext *tc) TableWalker::nextWalk(ThreadContext *tc)
{ {
if (pendingQueue.size()) if (pendingQueue.size())
schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(1)); schedule(doProcessEvent, tc->getCpuPtr()->clockEdge(Cycles(1)));
} }

View file

@ -102,7 +102,7 @@ void zeroRegisters(TC *tc);
inline void startupCPU(ThreadContext *tc, int cpuId) inline void startupCPU(ThreadContext *tc, int cpuId)
{ {
tc->activate(0); tc->activate(Cycles(0));
} }
void copyRegs(ThreadContext *src, ThreadContext *dest); void copyRegs(ThreadContext *src, ThreadContext *dest);

View file

@ -482,7 +482,7 @@ ISA::setMiscReg(int misc_reg, const MiscReg &val,
miscRegFile[misc_reg][reg_sel] = cp0_val; miscRegFile[misc_reg][reg_sel] = cp0_val;
scheduleCP0Update(tc->getCpuPtr(), 1); scheduleCP0Update(tc->getCpuPtr(), Cycles(1));
} }
/** /**
@ -511,14 +511,14 @@ ISA::filterCP0Write(int misc_reg, int reg_sel, const MiscReg &val)
} }
void void
ISA::scheduleCP0Update(BaseCPU *cpu, int delay) ISA::scheduleCP0Update(BaseCPU *cpu, Cycles delay)
{ {
if (!cp0Updated) { if (!cp0Updated) {
cp0Updated = true; cp0Updated = true;
//schedule UPDATE //schedule UPDATE
CP0Event *cp0_event = new CP0Event(this, cpu, UpdateCP0); CP0Event *cp0_event = new CP0Event(this, cpu, UpdateCP0);
cpu->schedule(cp0_event, curTick() + cpu->ticks(delay)); cpu->schedule(cp0_event, cpu->clockEdge(delay));
} }
} }
@ -573,9 +573,9 @@ ISA::CP0Event::description() const
} }
void void
ISA::CP0Event::scheduleEvent(int delay) ISA::CP0Event::scheduleEvent(Cycles delay)
{ {
cpu->reschedule(this, curTick() + cpu->ticks(delay), true); cpu->reschedule(this, cpu->clockEdge(delay), true);
} }
void void

View file

@ -136,14 +136,14 @@ namespace MipsISA
const char *description() const; const char *description() const;
/** Schedule This Event */ /** Schedule This Event */
void scheduleEvent(int delay); void scheduleEvent(Cycles delay);
/** Unschedule This Event */ /** Unschedule This Event */
void unscheduleEvent(); void unscheduleEvent();
}; };
// Schedule a CP0 Update Event // Schedule a CP0 Update Event
void scheduleCP0Update(BaseCPU *cpu, int delay = 0); void scheduleCP0Update(BaseCPU *cpu, Cycles delay = Cycles(0));
// If any changes have been made, then check the state for changes // If any changes have been made, then check the state for changes
// and if necessary alert the CPU // and if necessary alert the CPU

View file

@ -45,13 +45,13 @@ class ThreadContext;
namespace MipsISA namespace MipsISA
{ {
inline Tick inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt) handleIprRead(ThreadContext *xc, Packet *pkt)
{ {
panic("No implementation for handleIprRead in MIPS\n"); panic("No implementation for handleIprRead in MIPS\n");
} }
inline Tick inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt) handleIprWrite(ThreadContext *xc, Packet *pkt)
{ {
panic("No implementation for handleIprWrite in MIPS\n"); panic("No implementation for handleIprWrite in MIPS\n");

View file

@ -96,7 +96,7 @@ restoreThread(TC *tc)
// TODO: SET PC WITH AN EVENT INSTEAD OF INSTANTANEOUSLY // TODO: SET PC WITH AN EVENT INSTEAD OF INSTANTANEOUSLY
tc->pcState(restartPC); tc->pcState(restartPC);
tc->activate(0); tc->activate(Cycles(0));
warn("%i: Restoring thread %i in %s @ PC %x", warn("%i: Restoring thread %i in %s @ PC %x",
curTick(), tc->threadId(), tc->getCpuPtr()->name(), restartPC); curTick(), tc->threadId(), tc->getCpuPtr()->name(), restartPC);

View file

@ -231,7 +231,7 @@ zeroRegisters(CPU *cpu)
void void
startupCPU(ThreadContext *tc, int cpuId) startupCPU(ThreadContext *tc, int cpuId)
{ {
tc->activate(0/*tc->threadId()*/); tc->activate(Cycles(0));
} }
void void

View file

@ -49,13 +49,13 @@ class ThreadContext;
namespace PowerISA namespace PowerISA
{ {
inline Tick inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt) handleIprRead(ThreadContext *xc, Packet *pkt)
{ {
panic("No implementation for handleIprRead in POWER\n"); panic("No implementation for handleIprRead in POWER\n");
} }
inline Tick inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt) handleIprWrite(ThreadContext *xc, Packet *pkt)
{ {
panic("No implementation for handleIprWrite in POWER\n"); panic("No implementation for handleIprWrite in POWER\n");

View file

@ -59,7 +59,7 @@ void zeroRegisters(TC *tc);
inline void inline void
startupCPU(ThreadContext *tc, int cpuId) startupCPU(ThreadContext *tc, int cpuId)
{ {
tc->activate(0); tc->activate(Cycles(0));
} }
void void

View file

@ -44,13 +44,13 @@
namespace SparcISA namespace SparcISA
{ {
inline Tick inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt) handleIprRead(ThreadContext *xc, Packet *pkt)
{ {
return xc->getDTBPtr()->doMmuRegRead(xc, pkt); return xc->getDTBPtr()->doMmuRegRead(xc, pkt);
} }
inline Tick inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt) handleIprWrite(ThreadContext *xc, Packet *pkt)
{ {
return xc->getDTBPtr()->doMmuRegWrite(xc, pkt); return xc->getDTBPtr()->doMmuRegWrite(xc, pkt);

View file

@ -848,7 +848,7 @@ TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
return NoFault; return NoFault;
} }
Tick Cycles
TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt) TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
{ {
Addr va = pkt->getAddr(); Addr va = pkt->getAddr();
@ -1030,10 +1030,10 @@ doMmuReadError:
(uint32_t)asi, va); (uint32_t)asi, va);
} }
pkt->makeAtomicResponse(); pkt->makeAtomicResponse();
return tc->getCpuPtr()->ticks(1); return Cycles(1);
} }
Tick Cycles
TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt) TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
{ {
uint64_t data = pkt->get<uint64_t>(); uint64_t data = pkt->get<uint64_t>();
@ -1283,7 +1283,7 @@ doMmuWriteError:
(uint32_t)pkt->req->getAsi(), pkt->getAddr(), data); (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
} }
pkt->makeAtomicResponse(); pkt->makeAtomicResponse();
return tc->getCpuPtr()->ticks(1); return Cycles(1);
} }
void void

View file

@ -168,8 +168,8 @@ class TLB : public BaseTLB
* does not support the Checker model at the moment * does not support the Checker model at the moment
*/ */
Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode); Fault translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode);
Tick doMmuRegRead(ThreadContext *tc, Packet *pkt); Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt);
Tick doMmuRegWrite(ThreadContext *tc, Packet *pkt); Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt);
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs); void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs);
// Checkpointing // Checkpointing

View file

@ -114,7 +114,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc)
if (!(tick_cmpr & ~mask(63)) && time > 0) { if (!(tick_cmpr & ~mask(63)) && time > 0) {
if (tickCompare->scheduled()) if (tickCompare->scheduled())
cpu->deschedule(tickCompare); cpu->deschedule(tickCompare);
cpu->schedule(tickCompare, curTick() + time * cpu->ticks(1)); cpu->schedule(tickCompare, cpu->clockEdge(Cycles(time)));
} }
panic("writing to TICK compare register %#X\n", val); panic("writing to TICK compare register %#X\n", val);
break; break;
@ -130,7 +130,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc)
if (!(stick_cmpr & ~mask(63)) && time > 0) { if (!(stick_cmpr & ~mask(63)) && time > 0) {
if (sTickCompare->scheduled()) if (sTickCompare->scheduled())
cpu->deschedule(sTickCompare); cpu->deschedule(sTickCompare);
cpu->schedule(sTickCompare, curTick() + time * cpu->ticks(1)); cpu->schedule(sTickCompare, cpu->clockEdge(Cycles(time)));
} }
DPRINTF(Timer, "writing to sTICK compare register value %#X\n", val); DPRINTF(Timer, "writing to sTICK compare register value %#X\n", val);
break; break;
@ -200,7 +200,7 @@ ISA::setFSReg(int miscReg, const MiscReg &val, ThreadContext *tc)
if (!(hstick_cmpr & ~mask(63)) && time > 0) { if (!(hstick_cmpr & ~mask(63)) && time > 0) {
if (hSTickCompare->scheduled()) if (hSTickCompare->scheduled())
cpu->deschedule(hSTickCompare); cpu->deschedule(hSTickCompare);
cpu->schedule(hSTickCompare, curTick() + time * cpu->ticks(1)); cpu->schedule(hSTickCompare, cpu->clockEdge(Cycles(time)));
} }
DPRINTF(Timer, "writing to hsTICK compare register value %#X\n", val); DPRINTF(Timer, "writing to hsTICK compare register value %#X\n", val);
break; break;
@ -329,19 +329,19 @@ ISA::processSTickCompare(ThreadContext *tc)
// since our microcode instructions take two cycles we need to check if // since our microcode instructions take two cycles we need to check if
// we're actually at the correct cycle or we need to wait a little while // we're actually at the correct cycle or we need to wait a little while
// more // more
int ticks; int delay;
ticks = ((int64_t)(stick_cmpr & mask(63)) - (int64_t)stick) - delay = ((int64_t)(stick_cmpr & mask(63)) - (int64_t)stick) -
cpu->instCount(); cpu->instCount();
assert(ticks >= 0 && "stick compare missed interrupt cycle"); assert(delay >= 0 && "stick compare missed interrupt cycle");
if (ticks == 0 || tc->status() == ThreadContext::Suspended) { if (delay == 0 || tc->status() == ThreadContext::Suspended) {
DPRINTF(Timer, "STick compare cycle reached at %#x\n", DPRINTF(Timer, "STick compare cycle reached at %#x\n",
(stick_cmpr & mask(63))); (stick_cmpr & mask(63)));
if (!(tc->readMiscRegNoEffect(MISCREG_STICK_CMPR) & (ULL(1) << 63))) { if (!(tc->readMiscRegNoEffect(MISCREG_STICK_CMPR) & (ULL(1) << 63))) {
setMiscReg(MISCREG_SOFTINT, softint | (ULL(1) << 16), tc); setMiscReg(MISCREG_SOFTINT, softint | (ULL(1) << 16), tc);
} }
} else { } else {
cpu->schedule(sTickCompare, curTick() + ticks * cpu->ticks(1)); cpu->schedule(sTickCompare, cpu->clockEdge(Cycles(delay)));
} }
} }
@ -353,15 +353,15 @@ ISA::processHSTickCompare(ThreadContext *tc)
// since our microcode instructions take two cycles we need to check if // since our microcode instructions take two cycles we need to check if
// we're actually at the correct cycle or we need to wait a little while // we're actually at the correct cycle or we need to wait a little while
// more // more
int ticks; int delay;
if ( tc->status() == ThreadContext::Halted) if ( tc->status() == ThreadContext::Halted)
return; return;
ticks = ((int64_t)(hstick_cmpr & mask(63)) - (int64_t)stick) - delay = ((int64_t)(hstick_cmpr & mask(63)) - (int64_t)stick) -
cpu->instCount(); cpu->instCount();
assert(ticks >= 0 && "hstick compare missed interrupt cycle"); assert(delay >= 0 && "hstick compare missed interrupt cycle");
if (ticks == 0 || tc->status() == ThreadContext::Suspended) { if (delay == 0 || tc->status() == ThreadContext::Suspended) {
DPRINTF(Timer, "HSTick compare cycle reached at %#x\n", DPRINTF(Timer, "HSTick compare cycle reached at %#x\n",
(stick_cmpr & mask(63))); (stick_cmpr & mask(63)));
if (!(tc->readMiscRegNoEffect(MISCREG_HSTICK_CMPR) & (ULL(1) << 63))) { if (!(tc->readMiscRegNoEffect(MISCREG_HSTICK_CMPR) & (ULL(1) << 63))) {
@ -369,7 +369,7 @@ ISA::processHSTickCompare(ThreadContext *tc)
} }
// Need to do something to cause interrupt to happen here !!! @todo // Need to do something to cause interrupt to happen here !!! @todo
} else { } else {
cpu->schedule(hSTickCompare, curTick() + ticks * cpu->ticks(1)); cpu->schedule(hSTickCompare, cpu->clockEdge(Cycles(delay)));
} }
} }

View file

@ -77,7 +77,7 @@ startupCPU(ThreadContext *tc, int cpuId)
{ {
// Other CPUs will get activated by IPIs // Other CPUs will get activated by IPIs
if (cpuId == 0 || !FullSystem) if (cpuId == 0 || !FullSystem)
tc->activate(0); tc->activate(Cycles(0));
} }
void copyRegs(ThreadContext *src, ThreadContext *dest); void copyRegs(ThreadContext *src, ThreadContext *dest);

View file

@ -53,7 +53,7 @@
namespace X86ISA namespace X86ISA
{ {
inline Tick inline Cycles
handleIprRead(ThreadContext *xc, Packet *pkt) handleIprRead(ThreadContext *xc, Packet *pkt)
{ {
Addr offset = pkt->getAddr() & mask(3); Addr offset = pkt->getAddr() & mask(3);
@ -62,10 +62,10 @@ namespace X86ISA
// Make sure we don't trot off the end of data. // Make sure we don't trot off the end of data.
assert(offset + pkt->getSize() <= sizeof(MiscReg)); assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->setData(((uint8_t *)&data) + offset); pkt->setData(((uint8_t *)&data) + offset);
return 1; return Cycles(1);
} }
inline Tick inline Cycles
handleIprWrite(ThreadContext *xc, Packet *pkt) handleIprWrite(ThreadContext *xc, Packet *pkt)
{ {
Addr offset = pkt->getAddr() & mask(3); Addr offset = pkt->getAddr() & mask(3);
@ -76,7 +76,7 @@ namespace X86ISA
assert(offset + pkt->getSize() <= sizeof(MiscReg)); assert(offset + pkt->getSize() <= sizeof(MiscReg));
pkt->writeData(((uint8_t *)&data) + offset); pkt->writeData(((uint8_t *)&data) + offset);
xc->setMiscReg(index, gtoh(data)); xc->setMiscReg(index, gtoh(data));
return 1; return Cycles(1);
} }
} }

View file

@ -176,7 +176,7 @@ void initCPU(ThreadContext *tc, int cpuId)
// @todo: Control the relative frequency, in this case 16:1, of // @todo: Control the relative frequency, in this case 16:1, of
// the clocks in the Python code // the clocks in the Python code
interrupts->setClock(tc->getCpuPtr()->ticks(16)); interrupts->setClock(tc->getCpuPtr()->clockPeriod() * 16);
// TODO Set the SMRAM base address (SMBASE) to 0x00030000 // TODO Set the SMRAM base address (SMBASE) to 0x00030000
@ -189,12 +189,12 @@ void initCPU(ThreadContext *tc, int cpuId)
void startupCPU(ThreadContext *tc, int cpuId) void startupCPU(ThreadContext *tc, int cpuId)
{ {
if (cpuId == 0 || !FullSystem) { if (cpuId == 0 || !FullSystem) {
tc->activate(0); tc->activate(Cycles(0));
} else { } else {
// This is an application processor (AP). It should be initialized to // This is an application processor (AP). It should be initialized to
// look like only the BIOS POST has run on it and put then put it into // look like only the BIOS POST has run on it and put then put it into
// a halted state. // a halted state.
tc->suspend(0); tc->suspend(Cycles(0));
} }
} }

View file

@ -39,6 +39,8 @@
#include <inttypes.h> #include <inttypes.h>
#include <cassert>
/** uint64_t constant */ /** uint64_t constant */
#define ULL(N) ((uint64_t)N##ULL) #define ULL(N) ((uint64_t)N##ULL)
/** int64_t constant */ /** int64_t constant */
@ -57,6 +59,61 @@ typedef uint64_t Tick;
const Tick MaxTick = ULL(0xffffffffffffffff); const Tick MaxTick = ULL(0xffffffffffffffff);
/**
* Cycles is a wrapper class for representing cycle counts, i.e. a
* relative difference between two points in time, expressed in a
* number of clock cycles.
*
* The Cycles wrapper class is a type-safe alternative to a
* typedef, aiming to avoid unintentional mixing of cycles and ticks
* in the code base.
*
* Operators are defined inside an ifndef block to avoid swig touching
* them. Note that there is no overloading of the bool operator as the
* compiler is allowed to turn booleans into integers and this causes
* a whole range of issues in a handful locations. The solution to
* this problem would be to use the safe bool idiom, but for now we
* make do without the test and use the more elaborate comparison >
* Cycles(0).
*/
class Cycles
{
private:
/** Member holding the actual value. */
uint64_t c;
public:
/** Explicit constructor assigning a value. */
explicit Cycles(uint64_t _c) : c(_c) { }
#ifndef SWIG // keep the operators away from SWIG
/** Converting back to the value type. */
operator uint64_t() const { return c; }
/** Prefix increment operator. */
Cycles& operator++()
{ ++c; return *this; }
/** Prefix decrement operator. Is only temporarily used in the O3 CPU. */
Cycles& operator--()
{ assert(c != 0); --c; return *this; }
/** In-place addition of cycles. */
const Cycles& operator+=(const Cycles& cc)
{ c += cc.c; return *this; }
/** Greater than comparison used for > Cycles(0). */
bool operator>(const Cycles& cc) const
{ return c > cc.c; }
#endif // SWIG not touching operators
};
/** /**
* Address type * Address type
* This will probably be moved somewhere else in the near future. * This will probably be moved somewhere else in the near future.

View file

@ -139,8 +139,8 @@ class BaseCPU(MemObject):
"terminate when all threads have reached this load count") "terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0, max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count") "terminate when any thread reaches this load count")
progress_interval = Param.Tick(0, progress_interval = Param.Frequency('0Hz',
"interval to print out the progress message") "frequency to print out the progress message")
defer_registration = Param.Bool(False, defer_registration = Param.Bool(False,
"defer registration with system (for sampling)") "defer registration with system (for sampling)")

View file

@ -246,7 +246,7 @@ class BaseCPU : public MemObject
/// Notify the CPU that the indicated context is now active. The /// Notify the CPU that the indicated context is now active. The
/// delay parameter indicates the number of ticks to wait before /// delay parameter indicates the number of ticks to wait before
/// executing (typically 0 or 1). /// executing (typically 0 or 1).
virtual void activateContext(ThreadID thread_num, int delay) {} virtual void activateContext(ThreadID thread_num, Cycles delay) {}
/// Notify the CPU that the indicated context is now suspended. /// Notify the CPU that the indicated context is now suspended.
virtual void suspendContext(ThreadID thread_num) {} virtual void suspendContext(ThreadID thread_num) {}

View file

@ -156,13 +156,14 @@ class CheckerThreadContext : public ThreadContext
/// Set the status to Active. Optional delay indicates number of /// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution. /// cycles to wait before beginning execution.
void activate(int delay = 1) { actualTC->activate(delay); } void activate(Cycles delay = Cycles(1))
{ actualTC->activate(delay); }
/// Set the status to Suspended. /// Set the status to Suspended.
void suspend(int delay) { actualTC->suspend(delay); } void suspend(Cycles delay) { actualTC->suspend(delay); }
/// Set the status to Halted. /// Set the status to Halted.
void halt(int delay) { actualTC->halt(delay); } void halt(Cycles delay) { actualTC->halt(delay); }
void dumpFuncProfile() { actualTC->dumpFuncProfile(); } void dumpFuncProfile() { actualTC->dumpFuncProfile(); }

View file

@ -209,7 +209,7 @@ InOrderCPU::CPUEvent::description() const
} }
void void
InOrderCPU::CPUEvent::scheduleEvent(int delay) InOrderCPU::CPUEvent::scheduleEvent(Cycles delay)
{ {
assert(!scheduled() || squashed()); assert(!scheduled() || squashed());
cpu->reschedule(this, cpu->clockEdge(delay), true); cpu->reschedule(this, cpu->clockEdge(delay), true);
@ -407,7 +407,7 @@ InOrderCPU::InOrderCPU(Params *params)
lockFlag = false; lockFlag = false;
// Schedule First Tick Event, CPU will reschedule itself from here on out. // Schedule First Tick Event, CPU will reschedule itself from here on out.
scheduleTickEvent(0); scheduleTickEvent(Cycles(0));
} }
InOrderCPU::~InOrderCPU() InOrderCPU::~InOrderCPU()
@ -769,9 +769,9 @@ InOrderCPU::tick()
} else { } else {
//Tick next_tick = curTick() + cycles(1); //Tick next_tick = curTick() + cycles(1);
//tickEvent.schedule(next_tick); //tickEvent.schedule(next_tick);
schedule(&tickEvent, clockEdge(1)); schedule(&tickEvent, clockEdge(Cycles(1)));
DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n", DPRINTF(InOrderCPU, "Scheduled CPU for next tick @ %i.\n",
clockEdge(1)); clockEdge(Cycles(1)));
} }
} }
@ -877,7 +877,7 @@ InOrderCPU::checkForInterrupts()
// Schedule Squash Through-out Resource Pool // Schedule Squash Through-out Resource Pool
resPool->scheduleEvent( resPool->scheduleEvent(
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, (InOrderCPU::CPUEventType)ResourcePool::SquashAll,
dummyTrapInst[tid], 0); dummyTrapInst[tid], Cycles(0));
// Finally, Setup Trap to happen at end of cycle // Finally, Setup Trap to happen at end of cycle
trapContext(interrupt, tid, dummyTrapInst[tid]); trapContext(interrupt, tid, dummyTrapInst[tid]);
@ -912,7 +912,8 @@ InOrderCPU::processInterrupts(Fault interrupt)
} }
void void
InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay) InOrderCPU::trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
Cycles delay)
{ {
scheduleCpuEvent(Trap, fault, tid, inst, delay); scheduleCpuEvent(Trap, fault, tid, inst, delay);
trapPending[tid] = true; trapPending[tid] = true;
@ -926,7 +927,8 @@ InOrderCPU::trap(Fault fault, ThreadID tid, DynInstPtr inst)
} }
void void
InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay) InOrderCPU::squashFromMemStall(DynInstPtr inst, ThreadID tid,
Cycles delay)
{ {
scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay); scheduleCpuEvent(SquashFromMemStall, NoFault, tid, inst, delay);
} }
@ -954,7 +956,7 @@ InOrderCPU::squashDueToMemStall(int stage_num, InstSeqNum seq_num,
void void
InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault, InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
ThreadID tid, DynInstPtr inst, ThreadID tid, DynInstPtr inst,
unsigned delay, CPUEventPri event_pri) Cycles delay, CPUEventPri event_pri)
{ {
CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst, CPUEvent *cpu_event = new CPUEvent(this, c_event, fault, tid, inst,
event_pri); event_pri);
@ -967,7 +969,8 @@ InOrderCPU::scheduleCpuEvent(CPUEventType c_event, Fault fault,
// Broadcast event to the Resource Pool // Broadcast event to the Resource Pool
// Need to reset tid just in case this is a dummy instruction // Need to reset tid just in case this is a dummy instruction
inst->setTid(tid); inst->setTid(tid);
resPool->scheduleEvent(c_event, inst, 0, 0, tid); // @todo: Is this really right? Should the delay not be passed on?
resPool->scheduleEvent(c_event, inst, Cycles(0), 0, tid);
} }
bool bool
@ -1071,7 +1074,7 @@ InOrderCPU::activateThreadInPipeline(ThreadID tid)
} }
void void
InOrderCPU::deactivateContext(ThreadID tid, int delay) InOrderCPU::deactivateContext(ThreadID tid, Cycles delay)
{ {
DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid); DPRINTF(InOrderCPU,"[tid:%i]: Deactivating ...\n", tid);
@ -1153,7 +1156,7 @@ InOrderCPU::tickThreadStats()
} }
void void
InOrderCPU::activateContext(ThreadID tid, int delay) InOrderCPU::activateContext(ThreadID tid, Cycles delay)
{ {
DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid); DPRINTF(InOrderCPU,"[tid:%i]: Activating ...\n", tid);
@ -1168,7 +1171,7 @@ InOrderCPU::activateContext(ThreadID tid, int delay)
} }
void void
InOrderCPU::activateNextReadyContext(int delay) InOrderCPU::activateNextReadyContext(Cycles delay)
{ {
DPRINTF(InOrderCPU,"Activating next ready thread\n"); DPRINTF(InOrderCPU,"Activating next ready thread\n");
@ -1719,7 +1722,8 @@ InOrderCPU::wakeup()
} }
void void
InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay) InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
Cycles delay)
{ {
// Syscall must be non-speculative, so squash from last stage // Syscall must be non-speculative, so squash from last stage
unsigned squash_stage = NumStages - 1; unsigned squash_stage = NumStages - 1;
@ -1730,7 +1734,8 @@ InOrderCPU::syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay
// Schedule Squash Through-out Resource Pool // Schedule Squash Through-out Resource Pool
resPool->scheduleEvent( resPool->scheduleEvent(
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0); (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst,
Cycles(0));
scheduleCpuEvent(Syscall, fault, tid, inst, delay, Syscall_Pri); scheduleCpuEvent(Syscall, fault, tid, inst, delay, Syscall_Pri);
} }

View file

@ -201,7 +201,7 @@ class InOrderCPU : public BaseCPU
TickEvent tickEvent; TickEvent tickEvent;
/** Schedule tick event, regardless of its current state. */ /** Schedule tick event, regardless of its current state. */
void scheduleTickEvent(int delay) void scheduleTickEvent(Cycles delay)
{ {
assert(!tickEvent.scheduled() || tickEvent.squashed()); assert(!tickEvent.scheduled() || tickEvent.squashed());
reschedule(&tickEvent, clockEdge(delay), true); reschedule(&tickEvent, clockEdge(delay), true);
@ -279,7 +279,7 @@ class InOrderCPU : public BaseCPU
const char *description() const; const char *description() const;
/** Schedule Event */ /** Schedule Event */
void scheduleEvent(int delay); void scheduleEvent(Cycles delay);
/** Unschedule This Event */ /** Unschedule This Event */
void unscheduleEvent(); void unscheduleEvent();
@ -287,7 +287,7 @@ class InOrderCPU : public BaseCPU
/** Schedule a CPU Event */ /** Schedule a CPU Event */
void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid, void scheduleCpuEvent(CPUEventType cpu_event, Fault fault, ThreadID tid,
DynInstPtr inst, unsigned delay = 0, DynInstPtr inst, Cycles delay = Cycles(0),
CPUEventPri event_pri = InOrderCPU_Pri); CPUEventPri event_pri = InOrderCPU_Pri);
public: public:
@ -479,19 +479,20 @@ class InOrderCPU : public BaseCPU
/** Schedule a syscall on the CPU */ /** Schedule a syscall on the CPU */
void syscallContext(Fault fault, ThreadID tid, DynInstPtr inst, void syscallContext(Fault fault, ThreadID tid, DynInstPtr inst,
int delay = 0); Cycles delay = Cycles(0));
/** Executes a syscall.*/ /** Executes a syscall.*/
void syscall(int64_t callnum, ThreadID tid); void syscall(int64_t callnum, ThreadID tid);
/** Schedule a trap on the CPU */ /** Schedule a trap on the CPU */
void trapContext(Fault fault, ThreadID tid, DynInstPtr inst, int delay = 0); void trapContext(Fault fault, ThreadID tid, DynInstPtr inst,
Cycles delay = Cycles(0));
/** Perform trap to Handle Given Fault */ /** Perform trap to Handle Given Fault */
void trap(Fault fault, ThreadID tid, DynInstPtr inst); void trap(Fault fault, ThreadID tid, DynInstPtr inst);
/** Schedule thread activation on the CPU */ /** Schedule thread activation on the CPU */
void activateContext(ThreadID tid, int delay = 0); void activateContext(ThreadID tid, Cycles delay = Cycles(0));
/** Add Thread to Active Threads List. */ /** Add Thread to Active Threads List. */
void activateThread(ThreadID tid); void activateThread(ThreadID tid);
@ -500,13 +501,13 @@ class InOrderCPU : public BaseCPU
void activateThreadInPipeline(ThreadID tid); void activateThreadInPipeline(ThreadID tid);
/** Schedule Thread Activation from Ready List */ /** Schedule Thread Activation from Ready List */
void activateNextReadyContext(int delay = 0); void activateNextReadyContext(Cycles delay = Cycles(0));
/** Add Thread From Ready List to Active Threads List. */ /** Add Thread From Ready List to Active Threads List. */
void activateNextReadyThread(); void activateNextReadyThread();
/** Schedule a thread deactivation on the CPU */ /** Schedule a thread deactivation on the CPU */
void deactivateContext(ThreadID tid, int delay = 0); void deactivateContext(ThreadID tid, Cycles delay = Cycles(0));
/** Remove from Active Thread List */ /** Remove from Active Thread List */
void deactivateThread(ThreadID tid); void deactivateThread(ThreadID tid);
@ -529,7 +530,8 @@ class InOrderCPU : public BaseCPU
* squashDueToMemStall() - squashes pipeline * squashDueToMemStall() - squashes pipeline
* @note: maybe squashContext/squashThread would be better? * @note: maybe squashContext/squashThread would be better?
*/ */
void squashFromMemStall(DynInstPtr inst, ThreadID tid, int delay = 0); void squashFromMemStall(DynInstPtr inst, ThreadID tid,
Cycles delay = Cycles(0));
void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid); void squashDueToMemStall(int stage_num, InstSeqNum seq_num, ThreadID tid);
void removePipelineStalls(ThreadID tid); void removePipelineStalls(ThreadID tid);

View file

@ -556,7 +556,7 @@ PipelineStage::activateThread(ThreadID tid)
// prevent "double"-execution of instructions // prevent "double"-execution of instructions
cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType) cpu->resPool->scheduleEvent((InOrderCPU::CPUEventType)
ResourcePool::UpdateAfterContextSwitch, ResourcePool::UpdateAfterContextSwitch,
inst, 0, 0, tid); inst, Cycles(0), 0, tid);
// Clear switchout buffer // Clear switchout buffer
switchedOutBuffer[tid] = NULL; switchedOutBuffer[tid] = NULL;

View file

@ -44,7 +44,7 @@
using namespace std; using namespace std;
Resource::Resource(string res_name, int res_id, int res_width, Resource::Resource(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu) Cycles res_latency, InOrderCPU *_cpu)
: resName(res_name), id(res_id), : resName(res_name), id(res_id),
width(res_width), latency(res_latency), cpu(_cpu), width(res_width), latency(res_latency), cpu(_cpu),
resourceEvent(NULL) resourceEvent(NULL)
@ -76,7 +76,7 @@ Resource::init()
// If the resource has a zero-cycle (no latency) // If the resource has a zero-cycle (no latency)
// function, then no reason to have events // function, then no reason to have events
// that will process them for the right tick // that will process them for the right tick
if (latency > 0) if (latency > Cycles(0))
resourceEvent = new ResourceEvent[width]; resourceEvent = new ResourceEvent[width];
@ -296,7 +296,8 @@ Resource::setupSquash(DynInstPtr inst, int stage_num, ThreadID tid)
// Schedule Squash Through-out Resource Pool // Schedule Squash Through-out Resource Pool
cpu->resPool->scheduleEvent( cpu->resPool->scheduleEvent(
(InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst, 0); (InOrderCPU::CPUEventType)ResourcePool::SquashAll, inst,
Cycles(0));
} }
void void
@ -321,7 +322,7 @@ Resource::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num,
int req_slot_num = req_ptr->getSlot(); int req_slot_num = req_ptr->getSlot();
if (latency > 0) { if (latency > Cycles(0)) {
if (resourceEvent[req_slot_num].scheduled()) if (resourceEvent[req_slot_num].scheduled())
unscheduleEvent(req_slot_num); unscheduleEvent(req_slot_num);
} }
@ -362,17 +363,10 @@ Resource::squashThenTrap(int stage_num, DynInstPtr inst)
cpu->trapContext(inst->fault, tid, inst); cpu->trapContext(inst->fault, tid, inst);
} }
Tick
Resource::ticks(int num_cycles)
{
return cpu->ticks(num_cycles);
}
void void
Resource::scheduleExecution(int slot_num) Resource::scheduleExecution(int slot_num)
{ {
if (latency > 0) { if (latency > Cycles(0)) {
scheduleEvent(slot_num, latency); scheduleEvent(slot_num, latency);
} else { } else {
execute(slot_num); execute(slot_num);
@ -380,17 +374,17 @@ Resource::scheduleExecution(int slot_num)
} }
void void
Resource::scheduleEvent(int slot_idx, int delay) Resource::scheduleEvent(int slot_idx, Cycles delay)
{ {
DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n", DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n",
reqs[slot_idx]->inst->readTid(), reqs[slot_idx]->inst->readTid(),
reqs[slot_idx]->inst->seqNum, reqs[slot_idx]->inst->seqNum,
cpu->ticks(delay) + curTick()); cpu->clockEdge(delay));
resourceEvent[slot_idx].scheduleEvent(delay); resourceEvent[slot_idx].scheduleEvent(delay);
} }
bool bool
Resource::scheduleEvent(DynInstPtr inst, int delay) Resource::scheduleEvent(DynInstPtr inst, Cycles delay)
{ {
int slot_idx = findSlot(inst); int slot_idx = findSlot(inst);
@ -521,9 +515,9 @@ ResourceEvent::description() const
} }
void void
ResourceEvent::scheduleEvent(int delay) ResourceEvent::scheduleEvent(Cycles delay)
{ {
assert(!scheduled() || squashed()); assert(!scheduled() || squashed());
resource->cpu->reschedule(this, resource->cpu->reschedule(this,
curTick() + resource->ticks(delay), true); resource->cpu->clockEdge(delay), true);
} }

View file

@ -63,7 +63,7 @@ class Resource {
public: public:
Resource(std::string res_name, int res_id, int res_width, Resource(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu); Cycles res_latency, InOrderCPU *_cpu);
virtual ~Resource(); virtual ~Resource();
@ -178,11 +178,11 @@ class Resource {
int slotsInUse(); int slotsInUse();
/** Schedule resource event, regardless of its current state. */ /** Schedule resource event, regardless of its current state. */
void scheduleEvent(int slot_idx, int delay); void scheduleEvent(int slot_idx, Cycles delay);
/** Find instruction in list, Schedule resource event, regardless of its /** Find instruction in list, Schedule resource event, regardless of its
* current state. */ * current state. */
bool scheduleEvent(DynInstPtr inst, int delay); bool scheduleEvent(DynInstPtr inst, Cycles delay);
/** Unschedule resource event, regardless of its current state. */ /** Unschedule resource event, regardless of its current state. */
void unscheduleEvent(int slot_idx); void unscheduleEvent(int slot_idx);
@ -190,9 +190,6 @@ class Resource {
/** Unschedule resource event, regardless of its current state. */ /** Unschedule resource event, regardless of its current state. */
bool unscheduleEvent(DynInstPtr inst); bool unscheduleEvent(DynInstPtr inst);
/** Return the number of cycles in 'Tick' format */
Tick ticks(int numCycles);
/** Find the request that corresponds to this instruction */ /** Find the request that corresponds to this instruction */
virtual ResReqPtr findRequest(DynInstPtr inst); virtual ResReqPtr findRequest(DynInstPtr inst);
@ -206,7 +203,7 @@ class Resource {
/** Return Latency of Resource */ /** Return Latency of Resource */
/* Can be overridden for complex cases */ /* Can be overridden for complex cases */
virtual int getLatency(int slot_num) { return latency; } virtual Cycles getLatency(int slot_num) { return latency; }
protected: protected:
/** The name of this resource */ /** The name of this resource */
@ -226,7 +223,7 @@ class Resource {
* Note: Dynamic latency resources set this to 0 and * Note: Dynamic latency resources set this to 0 and
* manage the latency themselves * manage the latency themselves
*/ */
const int latency; const Cycles latency;
public: public:
/** List of all Requests the Resource is Servicing. Each request /** List of all Requests the Resource is Servicing. Each request
@ -287,7 +284,7 @@ class ResourceEvent : public Event
void setSlot(int slot) { slotIdx = slot; } void setSlot(int slot) { slotIdx = slot; }
/** Schedule resource event, regardless of its current state. */ /** Schedule resource event, regardless of its current state. */
void scheduleEvent(int delay); void scheduleEvent(Cycles delay);
/** Unschedule resource event, regardless of its current state. */ /** Unschedule resource event, regardless of its current state. */
void unscheduleEvent() void unscheduleEvent()

View file

@ -64,54 +64,57 @@ ResourcePool::ResourcePool(InOrderCPU *_cpu, ThePipeline::Params *params)
// name - id - bandwidth - latency - CPU - Parameters // name - id - bandwidth - latency - CPU - Parameters
// -------------------------------------------------- // --------------------------------------------------
resources.push_back(new FetchSeqUnit("fetch_seq_unit", FetchSeq, resources.push_back(new FetchSeqUnit("fetch_seq_unit", FetchSeq,
stage_width * 2, 0, _cpu, params)); stage_width * 2, Cycles(0),
_cpu, params));
// Keep track of the instruction fetch unit so we can easily // Keep track of the instruction fetch unit so we can easily
// provide a pointer to it in the CPU. // provide a pointer to it in the CPU.
instUnit = new FetchUnit("icache_port", ICache, instUnit = new FetchUnit("icache_port", ICache,
stage_width * 2 + MaxThreads, 0, _cpu, stage_width * 2 + MaxThreads, Cycles(0), _cpu,
params); params);
resources.push_back(instUnit); resources.push_back(instUnit);
resources.push_back(new DecodeUnit("decode_unit", Decode, resources.push_back(new DecodeUnit("decode_unit", Decode,
stage_width, 0, _cpu, params)); stage_width, Cycles(0), _cpu,
params));
resources.push_back(new BranchPredictor("branch_predictor", BPred, resources.push_back(new BranchPredictor("branch_predictor", BPred,
stage_width, 0, _cpu, params)); stage_width, Cycles(0),
_cpu, params));
resources.push_back(new InstBuffer("fetch_buffer_t0", FetchBuff, 4, resources.push_back(new InstBuffer("fetch_buffer_t0", FetchBuff, 4,
0, _cpu, params)); Cycles(0), _cpu, params));
resources.push_back(new UseDefUnit("regfile_manager", RegManager, resources.push_back(new UseDefUnit("regfile_manager", RegManager,
stage_width * 3, 0, _cpu, stage_width * 3, Cycles(0), _cpu,
params)); params));
resources.push_back(new AGENUnit("agen_unit", AGEN, resources.push_back(new AGENUnit("agen_unit", AGEN,
stage_width, 0, _cpu, params)); stage_width, Cycles(0), _cpu,
params));
resources.push_back(new ExecutionUnit("execution_unit", ExecUnit, resources.push_back(new ExecutionUnit("execution_unit", ExecUnit,
stage_width, 0, _cpu, params)); stage_width, Cycles(0), _cpu,
params));
resources.push_back(new MultDivUnit("mult_div_unit", MDU, resources.push_back(new MultDivUnit("mult_div_unit", MDU,
stage_width * 2, stage_width * 2, Cycles(0),
0, _cpu, params));
_cpu,
params));
// Keep track of the data load/store unit so we can easily provide // Keep track of the data load/store unit so we can easily provide
// a pointer to it in the CPU. // a pointer to it in the CPU.
dataUnit = new CacheUnit("dcache_port", DCache, dataUnit = new CacheUnit("dcache_port", DCache,
stage_width * 2 + MaxThreads, 0, _cpu, stage_width * 2 + MaxThreads, Cycles(0), _cpu,
params); params);
resources.push_back(dataUnit); resources.push_back(dataUnit);
gradObjects.push_back(BPred); gradObjects.push_back(BPred);
resources.push_back(new GraduationUnit("graduation_unit", Grad, resources.push_back(new GraduationUnit("graduation_unit", Grad,
stage_width, 0, _cpu, stage_width, Cycles(0), _cpu,
params)); params));
resources.push_back(new InstBuffer("fetch_buffer_t1", FetchBuff2, 4, resources.push_back(new InstBuffer("fetch_buffer_t1", FetchBuff2, 4,
0, _cpu, params)); Cycles(0), _cpu, params));
} }
@ -234,7 +237,7 @@ ResourcePool::slotsInUse(int res_idx)
// to the event construction // to the event construction
void void
ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst, ResourcePool::scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst,
int delay, int res_idx, ThreadID tid) Cycles delay, int res_idx, ThreadID tid)
{ {
assert(delay >= 0); assert(delay >= 0);
@ -456,7 +459,7 @@ ResourcePool::ResPoolEvent::description() const
/** Schedule resource event, regardless of its current state. */ /** Schedule resource event, regardless of its current state. */
void void
ResourcePool::ResPoolEvent::scheduleEvent(int delay) ResourcePool::ResPoolEvent::scheduleEvent(Cycles delay)
{ {
InOrderCPU *cpu = resPool->cpu; InOrderCPU *cpu = resPool->cpu;
assert(!scheduled() || squashed()); assert(!scheduled() || squashed());

View file

@ -132,7 +132,7 @@ class ResourcePool {
const char *description() const; const char *description() const;
/** Schedule Event */ /** Schedule Event */
void scheduleEvent(int delay); void scheduleEvent(Cycles delay);
/** Unschedule This Event */ /** Unschedule This Event */
void unscheduleEvent(); void unscheduleEvent();
@ -206,7 +206,8 @@ class ResourcePool {
/** Schedule resource event, regardless of its current state. */ /** Schedule resource event, regardless of its current state. */
void scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst = NULL, void scheduleEvent(InOrderCPU::CPUEventType e_type, DynInstPtr inst = NULL,
int delay = 0, int res_idx = 0, ThreadID tid = 0); Cycles delay = Cycles(0), int res_idx = 0,
ThreadID tid = 0);
/** UnSchedule resource event, regardless of its current state. */ /** UnSchedule resource event, regardless of its current state. */
void unscheduleEvent(int res_idx, DynInstPtr inst); void unscheduleEvent(int res_idx, DynInstPtr inst);

View file

@ -33,7 +33,7 @@
#include "debug/InOrderAGEN.hh" #include "debug/InOrderAGEN.hh"
AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width, AGENUnit::AGENUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu) : Resource(res_name, res_id, res_width, res_latency, _cpu)
{ } { }

View file

@ -48,7 +48,8 @@ class AGENUnit : public Resource {
public: public:
AGENUnit(std::string res_name, int res_id, int res_width, AGENUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
enum Command { enum Command {
GenerateAddr GenerateAddr

View file

@ -39,8 +39,9 @@ using namespace std;
using namespace TheISA; using namespace TheISA;
using namespace ThePipeline; using namespace ThePipeline;
BranchPredictor::BranchPredictor(std::string res_name, int res_id, int res_width, BranchPredictor::BranchPredictor(std::string res_name, int res_id,
int res_latency, InOrderCPU *_cpu, int res_width, Cycles res_latency,
InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu), : Resource(res_name, res_id, res_width, res_latency, _cpu),
branchPred(this, params) branchPred(this, params)

View file

@ -54,7 +54,8 @@ class BranchPredictor : public Resource {
public: public:
BranchPredictor(std::string res_name, int res_id, int res_width, BranchPredictor(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
void regStats(); void regStats();

View file

@ -67,7 +67,8 @@ printMemData(uint8_t *data, unsigned size)
#endif #endif
CacheUnit::CacheUnit(string res_name, int res_id, int res_width, CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu), : Resource(res_name, res_id, res_width, res_latency, _cpu),
cachePort(NULL), cachePortBlocked(false) cachePort(NULL), cachePortBlocked(false)
{ {

View file

@ -58,7 +58,8 @@ class CacheUnit : public Resource
public: public:
CacheUnit(std::string res_name, int res_id, int res_width, CacheUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
enum Command { enum Command {
InitiateReadData, InitiateReadData,

View file

@ -40,7 +40,7 @@ using namespace ThePipeline;
using namespace std; using namespace std;
DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width, DecodeUnit::DecodeUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu) : Resource(res_name, res_id, res_width, res_latency, _cpu)
{ {

View file

@ -48,7 +48,8 @@ class DecodeUnit : public Resource {
public: public:
DecodeUnit(std::string res_name, int res_id, int res_width, DecodeUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
enum Command { enum Command {
DecodeInst DecodeInst

View file

@ -44,7 +44,7 @@ using namespace std;
using namespace ThePipeline; using namespace ThePipeline;
ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width, ExecutionUnit::ExecutionUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu), : Resource(res_name, res_id, res_width, res_latency, _cpu),
lastExecuteTick(0), lastControlTick(0) lastExecuteTick(0), lastControlTick(0)

View file

@ -51,7 +51,8 @@ class ExecutionUnit : public Resource {
public: public:
ExecutionUnit(std::string res_name, int res_id, int res_width, ExecutionUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
public: public:
void regStats(); void regStats();

View file

@ -40,7 +40,7 @@ using namespace TheISA;
using namespace ThePipeline; using namespace ThePipeline;
FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width, FetchSeqUnit::FetchSeqUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu), : Resource(res_name, res_id, res_width, res_latency, _cpu),
instSize(sizeof(MachInst)) instSize(sizeof(MachInst))

View file

@ -54,7 +54,8 @@ class FetchSeqUnit : public Resource {
public: public:
FetchSeqUnit(std::string res_name, int res_id, int res_width, FetchSeqUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
~FetchSeqUnit(); ~FetchSeqUnit();
void init(); void init();

View file

@ -53,7 +53,7 @@ using namespace TheISA;
using namespace ThePipeline; using namespace ThePipeline;
FetchUnit::FetchUnit(string res_name, int res_id, int res_width, FetchUnit::FetchUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params), : CacheUnit(res_name, res_id, res_width, res_latency, _cpu, params),
instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize) instSize(sizeof(TheISA::MachInst)), fetchBuffSize(params->fetchBuffSize)

View file

@ -53,7 +53,8 @@ class FetchUnit : public CacheUnit
{ {
public: public:
FetchUnit(std::string res_name, int res_id, int res_width, FetchUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
virtual ~FetchUnit(); virtual ~FetchUnit();

View file

@ -35,7 +35,7 @@
using namespace ThePipeline; using namespace ThePipeline;
GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width, GraduationUnit::GraduationUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu) : Resource(res_name, res_id, res_width, res_latency, _cpu)
{ {

View file

@ -52,7 +52,7 @@ class GraduationUnit : public Resource {
public: public:
GraduationUnit(std::string res_name, int res_id, int res_width, GraduationUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params); ThePipeline::Params *params);
void execute(int slot_num); void execute(int slot_num);

View file

@ -45,7 +45,7 @@ using namespace TheISA;
using namespace ThePipeline; using namespace ThePipeline;
InstBuffer::InstBuffer(string res_name, int res_id, int res_width, InstBuffer::InstBuffer(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu) : Resource(res_name, res_id, res_width, res_latency, _cpu)
{ } { }

View file

@ -56,7 +56,8 @@ class InstBuffer : public Resource {
public: public:
InstBuffer(std::string res_name, int res_id, int res_width, InstBuffer(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
void regStats(); void regStats();

View file

@ -47,7 +47,7 @@ class MemDepUnit : public Resource {
public: public:
MemDepUnit(std::string res_name, int res_id, int res_width, MemDepUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu); Cycles res_latency, InOrderCPU *_cpu);
virtual ~MemDepUnit() {} virtual ~MemDepUnit() {}
virtual void execute(int slot_num); virtual void execute(int slot_num);

View file

@ -43,7 +43,7 @@ using namespace std;
using namespace ThePipeline; using namespace ThePipeline;
MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width, MultDivUnit::MultDivUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu), : Resource(res_name, res_id, res_width, res_latency, _cpu),
multRepeatRate(params->multRepeatRate), multRepeatRate(params->multRepeatRate),

View file

@ -56,7 +56,7 @@ class MultDivUnit : public Resource {
public: public:
MultDivUnit(std::string res_name, int res_id, int res_width, MultDivUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params); ThePipeline::Params *params);
public: public:
@ -86,23 +86,23 @@ class MultDivUnit : public Resource {
protected: protected:
/** Latency & Repeat Rate for Multiply Insts */ /** Latency & Repeat Rate for Multiply Insts */
unsigned multRepeatRate; unsigned multRepeatRate;
unsigned multLatency; Cycles multLatency;
/** Latency & Repeat Rate for 8-bit Divide Insts */ /** Latency & Repeat Rate for 8-bit Divide Insts */
unsigned div8RepeatRate; unsigned div8RepeatRate;
unsigned div8Latency; Cycles div8Latency;
/** Latency & Repeat Rate for 16-bit Divide Insts */ /** Latency & Repeat Rate for 16-bit Divide Insts */
unsigned div16RepeatRate; unsigned div16RepeatRate;
unsigned div16Latency; Cycles div16Latency;
/** Latency & Repeat Rate for 24-bit Divide Insts */ /** Latency & Repeat Rate for 24-bit Divide Insts */
unsigned div24RepeatRate; unsigned div24RepeatRate;
unsigned div24Latency; Cycles div24Latency;
/** Latency & Repeat Rate for 32-bit Divide Insts */ /** Latency & Repeat Rate for 32-bit Divide Insts */
unsigned div32RepeatRate; unsigned div32RepeatRate;
unsigned div32Latency; Cycles div32Latency;
/** Last cycle that MDU was used */ /** Last cycle that MDU was used */
Tick lastMDUCycle; Tick lastMDUCycle;

View file

@ -44,7 +44,8 @@ using namespace TheISA;
using namespace ThePipeline; using namespace ThePipeline;
TLBUnit::TLBUnit(string res_name, int res_id, int res_width, TLBUnit::TLBUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params) Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu) : Resource(res_name, res_id, res_width, res_latency, _cpu)
{ {
// Hard-Code Selection For Now // Hard-Code Selection For Now

View file

@ -55,7 +55,8 @@ class TLBUnit : public Resource
public: public:
TLBUnit(std::string res_name, int res_id, int res_width, TLBUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
virtual ~TLBUnit() {} virtual ~TLBUnit() {}
void init(); void init();

View file

@ -45,7 +45,7 @@ using namespace TheISA;
using namespace ThePipeline; using namespace ThePipeline;
UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width, UseDefUnit::UseDefUnit(string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params) ThePipeline::Params *params)
: Resource(res_name, res_id, res_width, res_latency, _cpu) : Resource(res_name, res_id, res_width, res_latency, _cpu)
{ {
@ -107,7 +107,7 @@ void
UseDefUnit::init() UseDefUnit::init()
{ {
// Set Up Resource Events to Appropriate Resource BandWidth // Set Up Resource Events to Appropriate Resource BandWidth
if (latency > 0) { if (latency > Cycles(0)) {
resourceEvent = new ResourceEvent[width]; resourceEvent = new ResourceEvent[width];
} else { } else {
resourceEvent = NULL; resourceEvent = NULL;

View file

@ -56,7 +56,8 @@ class UseDefUnit : public Resource {
public: public:
UseDefUnit(std::string res_name, int res_id, int res_width, UseDefUnit(std::string res_name, int res_id, int res_width,
int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params); Cycles res_latency, InOrderCPU *_cpu,
ThePipeline::Params *params);
void init(); void init();

View file

@ -98,7 +98,7 @@ InOrderThreadContext::takeOverFrom(ThreadContext *old_context)
} }
void void
InOrderThreadContext::activate(int delay) InOrderThreadContext::activate(Cycles delay)
{ {
DPRINTF(InOrderCPU, "Calling activate on Thread Context %d\n", DPRINTF(InOrderCPU, "Calling activate on Thread Context %d\n",
getThreadNum()); getThreadNum());
@ -113,7 +113,7 @@ InOrderThreadContext::activate(int delay)
void void
InOrderThreadContext::suspend(int delay) InOrderThreadContext::suspend(Cycles delay)
{ {
DPRINTF(InOrderCPU, "Calling suspend on Thread Context %d\n", DPRINTF(InOrderCPU, "Calling suspend on Thread Context %d\n",
getThreadNum()); getThreadNum());
@ -126,7 +126,7 @@ InOrderThreadContext::suspend(int delay)
} }
void void
InOrderThreadContext::halt(int delay) InOrderThreadContext::halt(Cycles delay)
{ {
DPRINTF(InOrderCPU, "Calling halt on Thread Context %d\n", DPRINTF(InOrderCPU, "Calling halt on Thread Context %d\n",
getThreadNum()); getThreadNum());

View file

@ -165,13 +165,13 @@ class InOrderThreadContext : public ThreadContext
/** Set the status to Active. Optional delay indicates number of /** Set the status to Active. Optional delay indicates number of
* cycles to wait before beginning execution. */ * cycles to wait before beginning execution. */
void activate(int delay = 1); void activate(Cycles delay = Cycles(1));
/** Set the status to Suspended. */ /** Set the status to Suspended. */
void suspend(int delay = 0); void suspend(Cycles delay = Cycles(0));
/** Set the status to Halted. */ /** Set the status to Halted. */
void halt(int delay = 0); void halt(Cycles delay = Cycles(0));
/** Takes over execution of a thread from another CPU. */ /** Takes over execution of a thread from another CPU. */
void takeOverFrom(ThreadContext *old_context); void takeOverFrom(ThreadContext *old_context);
@ -259,7 +259,7 @@ class InOrderThreadContext : public ThreadContext
int flattenFloatIndex(int reg) int flattenFloatIndex(int reg)
{ return cpu->isa[thread->threadId()].flattenFloatIndex(reg); } { return cpu->isa[thread->threadId()].flattenFloatIndex(reg); }
void activateContext(int delay) void activateContext(Cycles delay)
{ cpu->activateContext(thread->threadId(), delay); } { cpu->activateContext(thread->threadId(), delay); }
void deallocateContext() void deallocateContext()

View file

@ -409,7 +409,7 @@ class DefaultCommit
/** The latency to handle a trap. Used when scheduling trap /** The latency to handle a trap. Used when scheduling trap
* squash event. * squash event.
*/ */
uint trapLatency; Cycles trapLatency;
/** The interrupt fault. */ /** The interrupt fault. */
Fault interrupt; Fault interrupt;

View file

@ -256,7 +256,8 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params)
globalSeqNum(1), globalSeqNum(1),
system(params->system), system(params->system),
drainCount(0), drainCount(0),
deferRegistration(params->defer_registration) deferRegistration(params->defer_registration),
lastRunningCycle(curCycle())
{ {
if (!deferRegistration) { if (!deferRegistration) {
_status = Running; _status = Running;
@ -386,8 +387,6 @@ FullO3CPU<Impl>::FullO3CPU(DerivO3CPUParams *params)
// Setup the ROB for whichever stages need it. // Setup the ROB for whichever stages need it.
commit.setROB(&rob); commit.setROB(&rob);
lastRunningCycle = curCycle();
lastActivatedCycle = 0; lastActivatedCycle = 0;
#if 0 #if 0
// Give renameMap & rename stage access to the freeList; // Give renameMap & rename stage access to the freeList;
@ -629,7 +628,7 @@ FullO3CPU<Impl>::tick()
lastRunningCycle = curCycle(); lastRunningCycle = curCycle();
timesIdled++; timesIdled++;
} else { } else {
schedule(tickEvent, clockEdge(1)); schedule(tickEvent, clockEdge(Cycles(1)));
DPRINTF(O3CPU, "Scheduling next tick!\n"); DPRINTF(O3CPU, "Scheduling next tick!\n");
} }
} }
@ -741,12 +740,12 @@ FullO3CPU<Impl>::totalOps() const
template <class Impl> template <class Impl>
void void
FullO3CPU<Impl>::activateContext(ThreadID tid, int delay) FullO3CPU<Impl>::activateContext(ThreadID tid, Cycles delay)
{ {
// Needs to set each stage to running as well. // Needs to set each stage to running as well.
if (delay){ if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate " DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to activate "
"on cycle %d\n", tid, curTick() + ticks(delay)); "on cycle %d\n", tid, clockEdge(delay));
scheduleActivateThreadEvent(tid, delay); scheduleActivateThreadEvent(tid, delay);
} else { } else {
activateThread(tid); activateThread(tid);
@ -762,7 +761,8 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
activityRec.activity(); activityRec.activity();
fetch.wakeFromQuiesce(); fetch.wakeFromQuiesce();
Tick cycles = curCycle() - lastRunningCycle; Cycles cycles(curCycle() - lastRunningCycle);
// @todo: This is an oddity that is only here to match the stats
if (cycles != 0) if (cycles != 0)
--cycles; --cycles;
quiesceCycles += cycles; quiesceCycles += cycles;
@ -776,12 +776,12 @@ FullO3CPU<Impl>::activateContext(ThreadID tid, int delay)
template <class Impl> template <class Impl>
bool bool
FullO3CPU<Impl>::scheduleDeallocateContext(ThreadID tid, bool remove, FullO3CPU<Impl>::scheduleDeallocateContext(ThreadID tid, bool remove,
int delay) Cycles delay)
{ {
// Schedule removal of thread data from CPU // Schedule removal of thread data from CPU
if (delay){ if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate " DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate "
"on cycle %d\n", tid, curTick() + ticks(delay)); "on tick %d\n", tid, clockEdge(delay));
scheduleDeallocateContextEvent(tid, remove, delay); scheduleDeallocateContextEvent(tid, remove, delay);
return false; return false;
} else { } else {
@ -797,7 +797,7 @@ void
FullO3CPU<Impl>::suspendContext(ThreadID tid) FullO3CPU<Impl>::suspendContext(ThreadID tid)
{ {
DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid); DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid);
bool deallocated = scheduleDeallocateContext(tid, false, 1); bool deallocated = scheduleDeallocateContext(tid, false, Cycles(1));
// If this was the last thread then unschedule the tick event. // If this was the last thread then unschedule the tick event.
if ((activeThreads.size() == 1 && !deallocated) || if ((activeThreads.size() == 1 && !deallocated) ||
activeThreads.size() == 0) activeThreads.size() == 0)
@ -814,7 +814,7 @@ FullO3CPU<Impl>::haltContext(ThreadID tid)
{ {
//For now, this is the same as deallocate //For now, this is the same as deallocate
DPRINTF(O3CPU,"[tid:%i]: Halt Context called. Deallocating", tid); DPRINTF(O3CPU,"[tid:%i]: Halt Context called. Deallocating", tid);
scheduleDeallocateContext(tid, true, 1); scheduleDeallocateContext(tid, true, Cycles(1));
} }
template <class Impl> template <class Impl>
@ -854,7 +854,7 @@ FullO3CPU<Impl>::insertThread(ThreadID tid)
src_tc->setStatus(ThreadContext::Active); src_tc->setStatus(ThreadContext::Active);
activateContext(tid,1); activateContext(tid, Cycles(1));
//Reset ROB/IQ/LSQ Entries //Reset ROB/IQ/LSQ Entries
commit.rob->resetEntries(); commit.rob->resetEntries();
@ -1672,7 +1672,8 @@ FullO3CPU<Impl>::wakeCPU()
DPRINTF(Activity, "Waking up CPU\n"); DPRINTF(Activity, "Waking up CPU\n");
Tick cycles = curCycle() - lastRunningCycle; Cycles cycles(curCycle() - lastRunningCycle);
// @todo: This is an oddity that is only here to match the stats
if (cycles != 0) if (cycles != 0)
--cycles; --cycles;
idleCycles += cycles; idleCycles += cycles;

View file

@ -211,7 +211,7 @@ class FullO3CPU : public BaseO3CPU
TickEvent tickEvent; TickEvent tickEvent;
/** Schedule tick event, regardless of its current state. */ /** Schedule tick event, regardless of its current state. */
void scheduleTickEvent(int delay) void scheduleTickEvent(Cycles delay)
{ {
if (tickEvent.squashed()) if (tickEvent.squashed())
reschedule(tickEvent, clockEdge(delay)); reschedule(tickEvent, clockEdge(delay));
@ -251,7 +251,7 @@ class FullO3CPU : public BaseO3CPU
/** Schedule thread to activate , regardless of its current state. */ /** Schedule thread to activate , regardless of its current state. */
void void
scheduleActivateThreadEvent(ThreadID tid, int delay) scheduleActivateThreadEvent(ThreadID tid, Cycles delay)
{ {
// Schedule thread to activate, regardless of its current state. // Schedule thread to activate, regardless of its current state.
if (activateThreadEvent[tid].squashed()) if (activateThreadEvent[tid].squashed())
@ -314,7 +314,7 @@ class FullO3CPU : public BaseO3CPU
/** Schedule cpu to deallocate thread context.*/ /** Schedule cpu to deallocate thread context.*/
void void
scheduleDeallocateContextEvent(ThreadID tid, bool remove, int delay) scheduleDeallocateContextEvent(ThreadID tid, bool remove, Cycles delay)
{ {
// Schedule thread to activate, regardless of its current state. // Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed()) if (deallocateContextEvent[tid].squashed())
@ -392,7 +392,7 @@ class FullO3CPU : public BaseO3CPU
virtual Counter totalOps() const; virtual Counter totalOps() const;
/** Add Thread to Active Threads List. */ /** Add Thread to Active Threads List. */
void activateContext(ThreadID tid, int delay); void activateContext(ThreadID tid, Cycles delay);
/** Remove Thread from Active Threads List */ /** Remove Thread from Active Threads List */
void suspendContext(ThreadID tid); void suspendContext(ThreadID tid);
@ -400,7 +400,8 @@ class FullO3CPU : public BaseO3CPU
/** Remove Thread from Active Threads List && /** Remove Thread from Active Threads List &&
* Possibly Remove Thread Context from CPU. * Possibly Remove Thread Context from CPU.
*/ */
bool scheduleDeallocateContext(ThreadID tid, bool remove, int delay = 1); bool scheduleDeallocateContext(ThreadID tid, bool remove,
Cycles delay = Cycles(1));
/** Remove Thread from Active Threads List && /** Remove Thread from Active Threads List &&
* Remove Thread Context from CPU. * Remove Thread Context from CPU.
@ -748,7 +749,7 @@ class FullO3CPU : public BaseO3CPU
std::list<int> cpuWaitList; std::list<int> cpuWaitList;
/** The cycle that the CPU was last running, used for statistics. */ /** The cycle that the CPU was last running, used for statistics. */
Tick lastRunningCycle; Cycles lastRunningCycle;
/** The cycle that the CPU was last activated by a new thread*/ /** The cycle that the CPU was last activated by a new thread*/
Tick lastActivatedCycle; Tick lastActivatedCycle;

View file

@ -646,7 +646,8 @@ DefaultFetch<Impl>::finishTranslation(Fault fault, RequestPtr mem_req)
assert(!finishTranslationEvent.scheduled()); assert(!finishTranslationEvent.scheduled());
finishTranslationEvent.setFault(fault); finishTranslationEvent.setFault(fault);
finishTranslationEvent.setReq(mem_req); finishTranslationEvent.setReq(mem_req);
cpu->schedule(finishTranslationEvent, cpu->clockEdge(1)); cpu->schedule(finishTranslationEvent,
cpu->clockEdge(Cycles(1)));
return; return;
} }
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n", DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",

View file

@ -828,7 +828,8 @@ InstructionQueue<Impl>::scheduleReadyInsts()
FUCompletion *execution = new FUCompletion(issuing_inst, FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this); idx, this);
cpu->schedule(execution, cpu->clockEdge(op_latency - 1)); cpu->schedule(execution,
cpu->clockEdge(Cycles(op_latency - 1)));
// @todo: Enforce that issue_latency == 1 or op_latency // @todo: Enforce that issue_latency == 1 or op_latency
if (issue_latency > 1) { if (issue_latency > 1) {

View file

@ -607,7 +607,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
load_inst->memData = new uint8_t[64]; load_inst->memData = new uint8_t[64];
ThreadContext *thread = cpu->tcBase(lsqID); ThreadContext *thread = cpu->tcBase(lsqID);
Tick delay; Cycles delay(0);
PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq); PacketPtr data_pkt = new Packet(req, MemCmd::ReadReq);
if (!TheISA::HasUnalignedMemAcc || !sreqLow) { if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
@ -622,7 +622,7 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize()); snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
delay = TheISA::handleIprRead(thread, fst_data_pkt); delay = TheISA::handleIprRead(thread, fst_data_pkt);
unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt); Cycles delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
if (delay2 > delay) if (delay2 > delay)
delay = delay2; delay = delay2;

View file

@ -134,13 +134,13 @@ class O3ThreadContext : public ThreadContext
/** Set the status to Active. Optional delay indicates number of /** Set the status to Active. Optional delay indicates number of
* cycles to wait before beginning execution. */ * cycles to wait before beginning execution. */
virtual void activate(int delay = 1); virtual void activate(Cycles delay = Cycles(1));
/** Set the status to Suspended. */ /** Set the status to Suspended. */
virtual void suspend(int delay = 0); virtual void suspend(Cycles delay = Cycles(0));
/** Set the status to Halted. */ /** Set the status to Halted. */
virtual void halt(int delay = 0); virtual void halt(Cycles delay = Cycles(0));
/** Dumps the function profiling information. /** Dumps the function profiling information.
* @todo: Implement. * @todo: Implement.

View file

@ -102,7 +102,7 @@ O3ThreadContext<Impl>::takeOverFrom(ThreadContext *old_context)
template <class Impl> template <class Impl>
void void
O3ThreadContext<Impl>::activate(int delay) O3ThreadContext<Impl>::activate(Cycles delay)
{ {
DPRINTF(O3CPU, "Calling activate on Thread Context %d\n", DPRINTF(O3CPU, "Calling activate on Thread Context %d\n",
threadId()); threadId());
@ -119,7 +119,7 @@ O3ThreadContext<Impl>::activate(int delay)
template <class Impl> template <class Impl>
void void
O3ThreadContext<Impl>::suspend(int delay) O3ThreadContext<Impl>::suspend(Cycles delay)
{ {
DPRINTF(O3CPU, "Calling suspend on Thread Context %d\n", DPRINTF(O3CPU, "Calling suspend on Thread Context %d\n",
threadId()); threadId());
@ -136,7 +136,7 @@ O3ThreadContext<Impl>::suspend(int delay)
template <class Impl> template <class Impl>
void void
O3ThreadContext<Impl>::halt(int delay) O3ThreadContext<Impl>::halt(Cycles delay)
{ {
DPRINTF(O3CPU, "Calling halt on Thread Context %d\n", DPRINTF(O3CPU, "Calling halt on Thread Context %d\n",
threadId()); threadId());

View file

@ -197,7 +197,7 @@ AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
void void
AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay) AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
{ {
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
@ -208,7 +208,7 @@ AtomicSimpleCPU::activateContext(ThreadID thread_num, int delay)
assert(!tickEvent.scheduled()); assert(!tickEvent.scheduled());
notIdleFraction++; notIdleFraction++;
numCycles += tickToCycle(thread->lastActivate - thread->lastSuspend); numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
//Make sure ticks are still on multiples of cycles //Make sure ticks are still on multiples of cycles
schedule(tickEvent, clockEdge(delay)); schedule(tickEvent, clockEdge(delay));
@ -518,13 +518,11 @@ AtomicSimpleCPU::tick()
stall_ticks += dcache_latency; stall_ticks += dcache_latency;
if (stall_ticks) { if (stall_ticks) {
Tick stall_cycles = stall_ticks / clockPeriod(); // the atomic cpu does its accounting in ticks, so
Tick aligned_stall_ticks = ticks(stall_cycles); // keep counting in ticks but round to the clock
// period
if (aligned_stall_ticks < stall_ticks) latency += divCeil(stall_ticks, clockPeriod()) *
aligned_stall_ticks += 1; clockPeriod();
latency += aligned_stall_ticks;
} }
} }

View file

@ -127,7 +127,7 @@ class AtomicSimpleCPU : public BaseSimpleCPU
void switchOut(); void switchOut();
void takeOverFrom(BaseCPU *oldCPU); void takeOverFrom(BaseCPU *oldCPU);
virtual void activateContext(ThreadID thread_num, int delay); virtual void activateContext(ThreadID thread_num, Cycles delay);
virtual void suspendContext(ThreadID thread_num); virtual void suspendContext(ThreadID thread_num);
Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags); Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags);

View file

@ -187,7 +187,7 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
void void
TimingSimpleCPU::activateContext(ThreadID thread_num, int delay) TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
{ {
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay); DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
@ -229,7 +229,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
{ {
RequestPtr req = pkt->req; RequestPtr req = pkt->req;
if (req->isMmappedIpr()) { if (req->isMmappedIpr()) {
Tick delay = TheISA::handleIprRead(thread->getTC(), pkt); Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
new IprEvent(pkt, this, clockEdge(delay)); new IprEvent(pkt, this, clockEdge(delay));
_status = DcacheWaitResponse; _status = DcacheWaitResponse;
dcache_pkt = NULL; dcache_pkt = NULL;
@ -443,7 +443,7 @@ TimingSimpleCPU::handleWritePacket()
{ {
RequestPtr req = dcache_pkt->req; RequestPtr req = dcache_pkt->req;
if (req->isMmappedIpr()) { if (req->isMmappedIpr()) {
Tick delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt); Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
new IprEvent(dcache_pkt, this, clockEdge(delay)); new IprEvent(dcache_pkt, this, clockEdge(delay));
_status = DcacheWaitResponse; _status = DcacheWaitResponse;
dcache_pkt = NULL; dcache_pkt = NULL;

View file

@ -255,7 +255,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
void switchOut(); void switchOut();
void takeOverFrom(BaseCPU *oldCPU); void takeOverFrom(BaseCPU *oldCPU);
virtual void activateContext(ThreadID thread_num, int delay); virtual void activateContext(ThreadID thread_num, Cycles delay);
virtual void suspendContext(ThreadID thread_num); virtual void suspendContext(ThreadID thread_num);
Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags); Fault readMem(Addr addr, uint8_t *data, unsigned size, unsigned flags);

View file

@ -210,7 +210,7 @@ SimpleThread::dumpFuncProfile()
} }
void void
SimpleThread::activate(int delay) SimpleThread::activate(Cycles delay)
{ {
if (status() == ThreadContext::Active) if (status() == ThreadContext::Active)
return; return;

View file

@ -209,7 +209,7 @@ class SimpleThread : public ThreadState
/// Set the status to Active. Optional delay indicates number of /// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution. /// cycles to wait before beginning execution.
void activate(int delay = 1); void activate(Cycles delay = Cycles(1));
/// Set the status to Suspended. /// Set the status to Suspended.
void suspend(); void suspend();

View file

@ -246,7 +246,7 @@ void
MemTest::tick() MemTest::tick()
{ {
if (!tickEvent.scheduled()) if (!tickEvent.scheduled())
schedule(tickEvent, clockEdge(1)); schedule(tickEvent, clockEdge(Cycles(1)));
if (++noResponseCycles >= 500000) { if (++noResponseCycles >= 500000) {
if (issueDmas) { if (issueDmas) {

View file

@ -165,7 +165,7 @@ NetworkTest::tick()
exitSimLoop("Network Tester completed simCycles"); exitSimLoop("Network Tester completed simCycles");
else { else {
if (!tickEvent.scheduled()) if (!tickEvent.scheduled())
schedule(tickEvent, clockEdge(1)); schedule(tickEvent, clockEdge(Cycles(1)));
} }
} }

View file

@ -163,13 +163,13 @@ class ThreadContext
/// Set the status to Active. Optional delay indicates number of /// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution. /// cycles to wait before beginning execution.
virtual void activate(int delay = 1) = 0; virtual void activate(Cycles delay = Cycles(1)) = 0;
/// Set the status to Suspended. /// Set the status to Suspended.
virtual void suspend(int delay = 0) = 0; virtual void suspend(Cycles delay = Cycles(0)) = 0;
/// Set the status to Halted. /// Set the status to Halted.
virtual void halt(int delay = 0) = 0; virtual void halt(Cycles delay = Cycles(0)) = 0;
virtual void dumpFuncProfile() = 0; virtual void dumpFuncProfile() = 0;
@ -329,13 +329,14 @@ class ProxyThreadContext : public ThreadContext
/// Set the status to Active. Optional delay indicates number of /// Set the status to Active. Optional delay indicates number of
/// cycles to wait before beginning execution. /// cycles to wait before beginning execution.
void activate(int delay = 1) { actualTC->activate(delay); } void activate(Cycles delay = Cycles(1))
{ actualTC->activate(delay); }
/// Set the status to Suspended. /// Set the status to Suspended.
void suspend(int delay = 0) { actualTC->suspend(); } void suspend(Cycles delay = Cycles(0)) { actualTC->suspend(); }
/// Set the status to Halted. /// Set the status to Halted.
void halt(int delay = 0) { actualTC->halt(); } void halt(Cycles delay = Cycles(0)) { actualTC->halt(); }
void dumpFuncProfile() { actualTC->dumpFuncProfile(); } void dumpFuncProfile() { actualTC->dumpFuncProfile(); }

View file

@ -475,7 +475,7 @@ Pl111::fillFifo()
void void
Pl111::dmaDone() Pl111::dmaDone()
{ {
Tick maxFrameTime = lcdTiming2.cpl * height; Cycles maxFrameTime(lcdTiming2.cpl * height);
--dmaPendingNum; --dmaPendingNum;
@ -503,8 +503,11 @@ Pl111::dmaDone()
// argument into a relative number of cycles in the future by // argument into a relative number of cycles in the future by
// subtracting curCycle() // subtracting curCycle()
if (lcdControl.lcden) if (lcdControl.lcden)
schedule(readEvent, clockEdge(startTime + maxFrameTime - // @todo: This is a terrible way of doing the time
curCycle())); // keeping, make it all relative
schedule(readEvent,
clockEdge(Cycles(startTime - curCycle() +
maxFrameTime)));
} }
if (dmaPendingNum > (maxOutstandingDma - waterMark)) if (dmaPendingNum > (maxOutstandingDma - waterMark))

View file

@ -2052,7 +2052,7 @@ IGbE::restartClock()
{ {
if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) && if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick) &&
getState() == SimObject::Running) getState() == SimObject::Running)
schedule(tickEvent, clockEdge(1)); schedule(tickEvent, clockEdge(Cycles(1)));
} }
unsigned int unsigned int

View file

@ -1321,7 +1321,7 @@ Device::transferDone()
DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n"); DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
reschedule(txEvent, curTick() + ticks(1), true); reschedule(txEvent, curTick() + clockPeriod(), true);
} }
bool bool

View file

@ -56,7 +56,7 @@
Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name, Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
Bridge& _bridge, Bridge& _bridge,
BridgeMasterPort& _masterPort, BridgeMasterPort& _masterPort,
int _delay, int _resp_limit, Cycles _delay, int _resp_limit,
std::vector<Range<Addr> > _ranges) std::vector<Range<Addr> > _ranges)
: SlavePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort), : SlavePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort),
delay(_delay), ranges(_ranges.begin(), _ranges.end()), delay(_delay), ranges(_ranges.begin(), _ranges.end()),
@ -68,7 +68,7 @@ Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name, Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
Bridge& _bridge, Bridge& _bridge,
BridgeSlavePort& _slavePort, BridgeSlavePort& _slavePort,
int _delay, int _req_limit) Cycles _delay, int _req_limit)
: MasterPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort), : MasterPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort),
delay(_delay), reqQueueLimit(_req_limit), sendEvent(*this) delay(_delay), reqQueueLimit(_req_limit), sendEvent(*this)
{ {
@ -76,9 +76,10 @@ Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
Bridge::Bridge(Params *p) Bridge::Bridge(Params *p)
: MemObject(p), : MemObject(p),
slavePort(p->name + ".slave", *this, masterPort, p->delay, p->resp_size, slavePort(p->name + ".slave", *this, masterPort,
p->ranges), ticksToCycles(p->delay), p->resp_size, p->ranges),
masterPort(p->name + ".master", *this, slavePort, p->delay, p->req_size) masterPort(p->name + ".master", *this, slavePort,
ticksToCycles(p->delay), p->req_size)
{ {
} }
@ -140,7 +141,7 @@ Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size()); DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size());
slavePort.schedTimingResp(pkt, curTick() + delay); slavePort.schedTimingResp(pkt, bridge.clockEdge(delay));
return true; return true;
} }
@ -170,7 +171,7 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
assert(outstandingResponses != respQueueLimit); assert(outstandingResponses != respQueueLimit);
++outstandingResponses; ++outstandingResponses;
retryReq = false; retryReq = false;
masterPort.schedTimingReq(pkt, curTick() + delay); masterPort.schedTimingReq(pkt, bridge.clockEdge(delay));
} }
} }
@ -352,7 +353,7 @@ Bridge::BridgeSlavePort::recvRetry()
Tick Tick
Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt) Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
{ {
return delay + masterPort.sendAtomic(pkt); return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt);
} }
void void

View file

@ -140,7 +140,7 @@ class Bridge : public MemObject
BridgeMasterPort& masterPort; BridgeMasterPort& masterPort;
/** Minimum request delay though this bridge. */ /** Minimum request delay though this bridge. */
Tick delay; Cycles delay;
/** Address ranges to pass through the bridge */ /** Address ranges to pass through the bridge */
AddrRangeList ranges; AddrRangeList ranges;
@ -187,12 +187,12 @@ class Bridge : public MemObject
* @param _name the port name including the owner * @param _name the port name including the owner
* @param _bridge the structural owner * @param _bridge the structural owner
* @param _masterPort the master port on the other side of the bridge * @param _masterPort the master port on the other side of the bridge
* @param _delay the delay from seeing a response to sending it * @param _delay the delay in cycles from receiving to sending
* @param _resp_limit the size of the response queue * @param _resp_limit the size of the response queue
* @param _ranges a number of address ranges to forward * @param _ranges a number of address ranges to forward
*/ */
BridgeSlavePort(const std::string& _name, Bridge& _bridge, BridgeSlavePort(const std::string& _name, Bridge& _bridge,
BridgeMasterPort& _masterPort, int _delay, BridgeMasterPort& _masterPort, Cycles _delay,
int _resp_limit, std::vector<Range<Addr> > _ranges); int _resp_limit, std::vector<Range<Addr> > _ranges);
/** /**
@ -255,7 +255,7 @@ class Bridge : public MemObject
BridgeSlavePort& slavePort; BridgeSlavePort& slavePort;
/** Minimum delay though this bridge. */ /** Minimum delay though this bridge. */
Tick delay; Cycles delay;
/** /**
* Request packet queue. Request packets are held in this * Request packet queue. Request packets are held in this
@ -286,11 +286,11 @@ class Bridge : public MemObject
* @param _name the port name including the owner * @param _name the port name including the owner
* @param _bridge the structural owner * @param _bridge the structural owner
* @param _slavePort the slave port on the other side of the bridge * @param _slavePort the slave port on the other side of the bridge
* @param _delay the delay from seeing a request to sending it * @param _delay the delay in cycles from receiving to sending
* @param _req_limit the size of the request queue * @param _req_limit the size of the request queue
*/ */
BridgeMasterPort(const std::string& _name, Bridge& _bridge, BridgeMasterPort(const std::string& _name, Bridge& _bridge,
BridgeSlavePort& _slavePort, int _delay, BridgeSlavePort& _slavePort, Cycles _delay,
int _req_limit); int _req_limit);
/** /**

View file

@ -463,6 +463,8 @@ class CheckedInt(NumericParamValue):
# most derived types require this, so we just do it here once # most derived types require this, so we just do it here once
code('%import "stdint.i"') code('%import "stdint.i"')
code('%import "base/types.hh"') code('%import "base/types.hh"')
# ignore the case operator for Cycles
code('%ignore *::operator uint64_t() const;')
def getValue(self): def getValue(self):
return long(self.value) return long(self.value)
@ -480,6 +482,7 @@ class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False
class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
class Cycles(CheckedInt): cxx_type = 'Cycles'; size = 64; unsigned = True
class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True

View file

@ -64,7 +64,7 @@ class ClockedObject : public SimObject
// The cycle counter value corresponding to the current value of // The cycle counter value corresponding to the current value of
// 'tick' // 'tick'
mutable Tick cycle; mutable Cycles cycle;
/** /**
* Prevent inadvertent use of the copy constructor and assignment * Prevent inadvertent use of the copy constructor and assignment
@ -96,7 +96,7 @@ class ClockedObject : public SimObject
// if not, we have to recalculate the cycle and tick, we // if not, we have to recalculate the cycle and tick, we
// perform the calculations in terms of relative cycles to // perform the calculations in terms of relative cycles to
// allow changes to the clock period in the future // allow changes to the clock period in the future
Tick elapsedCycles = divCeil(curTick() - tick, clock); Cycles elapsedCycles(divCeil(curTick() - tick, clock));
cycle += elapsedCycles; cycle += elapsedCycles;
tick += elapsedCycles * clock; tick += elapsedCycles * clock;
} }
@ -130,22 +130,22 @@ class ClockedObject : public SimObject
* *
* @return The tick when the clock edge occurs * @return The tick when the clock edge occurs
*/ */
inline Tick clockEdge(int cycles = 0) const inline Tick clockEdge(Cycles cycles = Cycles(0)) const
{ {
// align tick to the next clock edge // align tick to the next clock edge
update(); update();
// figure out when this future cycle is // figure out when this future cycle is
return tick + ticks(cycles); return tick + clock * cycles;
} }
/** /**
* Determine the current cycle, corresponding to a tick aligned to * Determine the current cycle, corresponding to a tick aligned to
* a clock edge. * a clock edge.
* *
* @return The current cycle * @return The current cycle count
*/ */
inline Tick curCycle() const inline Cycles curCycle() const
{ {
// align cycle to the next clock edge. // align cycle to the next clock edge.
update(); update();
@ -162,13 +162,12 @@ class ClockedObject : public SimObject
Tick nextCycle() const Tick nextCycle() const
{ return clockEdge(); } { return clockEdge(); }
inline Tick frequency() const { return SimClock::Frequency / clock; } inline uint64_t frequency() const { return SimClock::Frequency / clock; }
inline Tick ticks(int cycles) const { return clock * cycles; }
inline Tick clockPeriod() const { return clock; } inline Tick clockPeriod() const { return clock; }
inline Tick tickToCycle(Tick tick) const { return tick / clock; } inline Cycles ticksToCycles(Tick tick) const
{ return Cycles(tick / clock); }
}; };

View file

@ -245,7 +245,7 @@ Process::initState()
ThreadContext *tc = system->getThreadContext(contextIds[0]); ThreadContext *tc = system->getThreadContext(contextIds[0]);
// mark this context as active so it will start ticking. // mark this context as active so it will start ticking.
tc->activate(0); tc->activate(Cycles(0));
} }
// map simulator fd sim_fd to target fd tgt_fd // map simulator fd sim_fd to target fd tgt_fd

View file

@ -172,7 +172,7 @@ quiesceCycles(ThreadContext *tc, uint64_t cycles)
EndQuiesceEvent *quiesceEvent = tc->getQuiesceEvent(); EndQuiesceEvent *quiesceEvent = tc->getQuiesceEvent();
Tick resume = curTick() + cpu->ticks(cycles); Tick resume = cpu->clockEdge(Cycles(cycles));
cpu->reschedule(quiesceEvent, resume, true); cpu->reschedule(quiesceEvent, resume, true);