sim: separate nextCycle() and clockEdge() in clockedObjects

Previously, nextCycle() could return the *current* cycle if the current tick was
already aligned with the clock edge. This behavior is not only confusing (not
quite what the function name implies), but also caused problems in the
drainResume() function. When exiting/re-entering the sim loop (e.g., to take
checkpoints), the CPUs will drain and resume. Due to the previous behavior of
nextCycle(), the CPU tick events were being rescheduled in the same ticks that
were already processed before draining. This caused divergence from runs that
did not exit/re-entered the sim loop. (Initially a cycle difference, but a
significant impact later on.)

This patch separates out the two behaviors (nextCycle() and clockEdge()),
uses nextCycle() in drainResume, and uses clockEdge() everywhere else.
Nothing (other than name) should change except for the drainResume timing.
This commit is contained in:
Dam Sunwoo 2013-04-22 13:20:31 -04:00
parent 2c1e344313
commit e8381142b0
9 changed files with 21 additions and 20 deletions

View file

@ -1715,7 +1715,7 @@ InOrderCPU::wakeCPU()
numCycles += extra_cycles;
schedule(&tickEvent, nextCycle());
schedule(&tickEvent, clockEdge());
}
// Lots of copied full system code...place into BaseCPU class?

View file

@ -1720,7 +1720,7 @@ FullO3CPU<Impl>::wakeCPU()
idleCycles += cycles;
numCycles += cycles;
schedule(tickEvent, nextCycle());
schedule(tickEvent, clockEdge());
}
template <class Impl>

View file

@ -120,7 +120,7 @@ TimingSimpleCPU::drain(DrainManager *drain_manager)
// succeed on the first attempt. We need to reschedule it if
// the CPU is waiting for a microcode routine to complete.
if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
schedule(fetchEvent, nextCycle());
schedule(fetchEvent, clockEdge());
return 1;
}
@ -616,7 +616,7 @@ TimingSimpleCPU::advanceInst(Fault fault)
if (fault != NoFault) {
advancePC(fault);
DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
reschedule(fetchEvent, nextCycle(), true);
reschedule(fetchEvent, clockEdge(), true);
_status = Faulting;
return;
}
@ -715,7 +715,7 @@ TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
// delay processing of returned data until next CPU clock edge
Tick next_tick = cpu->nextCycle();
Tick next_tick = cpu->clockEdge();
if (next_tick == curTick())
cpu->completeIfetch(pkt);
@ -807,7 +807,7 @@ bool
TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
{
// delay processing of returned data until next CPU clock edge
Tick next_tick = cpu->nextCycle();
Tick next_tick = cpu->clockEdge();
if (next_tick == curTick()) {
cpu->completeDataAccess(pkt);

View file

@ -282,7 +282,7 @@ HDLcd::write(PacketPtr pkt)
if (new_command.enable) {
doUpdateParams = true;
if (!frameUnderway) {
schedule(startFrameEvent, nextCycle());
schedule(startFrameEvent, clockEdge());
}
}
}
@ -514,7 +514,7 @@ HDLcd::renderPixel()
frameUnderrun = true;
int_rawstat.underrun = 1;
if (!intEvent.scheduled())
schedule(intEvent, nextCycle());
schedule(intEvent, clockEdge());
} else {
// emulate the pixel read from the internal buffer
pixelBufferSize -= bytesPerPixel() * count;
@ -524,7 +524,7 @@ HDLcd::renderPixel()
// the DMA may have previously stalled due to the buffer being full;
// give it a kick; it knows not to fill if at end of frame, underrun, etc
if (!fillPixelBufferEvent.scheduled())
schedule(fillPixelBufferEvent, nextCycle());
schedule(fillPixelBufferEvent, clockEdge());
// schedule the next pixel read according to where it is in the frame
pixelIndex += count;
@ -597,7 +597,7 @@ HDLcd::dmaDone(DmaDoneEvent *event)
if ((dmaCurAddr < dmaMaxAddr) &&
(bytesFreeInPixelBuffer() + targetTransSize < PIXEL_BUFFER_CAPACITY) &&
!fillPixelBufferEvent.scheduled()) {
schedule(fillPixelBufferEvent, nextCycle());
schedule(fillPixelBufferEvent, clockEdge());
}
}

View file

@ -441,7 +441,7 @@ Pl111::readFramebuffer()
// Updating base address, interrupt if we're supposed to
lcdRis.baseaddr = 1;
if (!intEvent.scheduled())
schedule(intEvent, nextCycle());
schedule(intEvent, clockEdge());
curAddr = 0;
startTime = curTick();
@ -492,7 +492,7 @@ Pl111::dmaDone()
" have taken %d\n", curTick() - startTime, maxFrameTime);
lcdRis.underflow = 1;
if (!intEvent.scheduled())
schedule(intEvent, nextCycle());
schedule(intEvent, clockEdge());
}
assert(!readEvent.scheduled());
@ -522,7 +522,7 @@ Pl111::dmaDone()
return;
if (!fillFifoEvent.scheduled())
schedule(fillFifoEvent, nextCycle());
schedule(fillFifoEvent, clockEdge());
}
void

View file

@ -277,7 +277,7 @@ Bridge::BridgeMasterPort::trySendTiming()
req = transmitList.front();
DPRINTF(Bridge, "Scheduling next send\n");
bridge.schedule(sendEvent, std::max(req.tick,
bridge.nextCycle()));
bridge.clockEdge()));
}
// if we have stalled a request due to a full request queue,
@ -318,7 +318,7 @@ Bridge::BridgeSlavePort::trySendTiming()
resp = transmitList.front();
DPRINTF(Bridge, "Scheduling next send\n");
bridge.schedule(sendEvent, std::max(resp.tick,
bridge.nextCycle()));
bridge.clockEdge()));
}
// if there is space in the request queue and we were stalling

View file

@ -135,7 +135,7 @@ BaseBus::calcPacketTiming(PacketPtr pkt)
// the bus will be called at a time that is not necessarily
// coinciding with its own clock, so start by determining how long
// until the next clock edge (could be zero)
Tick offset = nextCycle() - curTick();
Tick offset = clockEdge() - curTick();
// determine how many cycles are needed to send the data
unsigned dataCycles = pkt->hasData() ? divCeil(pkt->getSize(), width) : 0;

View file

@ -307,7 +307,7 @@ RubyMemoryControl::enqueueMemRef(MemoryNode& memRef)
m_input_queue.push_back(memRef);
if (!m_event.scheduled()) {
schedule(m_event, nextCycle());
schedule(m_event, clockEdge());
}
}

View file

@ -172,13 +172,14 @@ class ClockedObject : public SimObject
}
/**
* Based on the clock of the object, determine the tick when the
* next cycle begins, in other words, return the next clock edge.
* Based on the clock of the object, determine the tick when the next
* cycle begins, in other words, return the next clock edge.
* (This can never be the current tick.)
*
* @return The tick when the next cycle starts
*/
Tick nextCycle() const
{ return clockEdge(); }
{ return clockEdge(Cycles(1)); }
inline uint64_t frequency() const { return SimClock::Frequency / clock; }