RubyPort and Sequencer: Fix draining

Fix the drain functionality of the RubyPort to only call drain on child ports
during a system-wide drain process, instead of calling each time that a
ruby_hit_callback is executed.

This fixes the issue of the RubyPort ports being reawakened during the drain
simulation, possibly with work they didn't previously have to complete. If
they have new work, they may call process on the drain event that they had
not registered work for, causing an assertion failure when completing the
drain event.

Also, in RubyPort, set the drainEvent to NULL when there are no events
to be drained. If not set to NULL, the drain loop can result in stale
drainEvents used.
This commit is contained in:
Joel Hestness 2012-09-23 13:57:08 -05:00
parent 6427342318
commit 4095af5fd6
3 changed files with 22 additions and 20 deletions

View file

@ -527,7 +527,7 @@ RubyPort::testDrainComplete()
{
//If we weren't able to drain before, we might be able to now.
if (drainEvent != NULL) {
unsigned int drainCount = getDrainCount(drainEvent);
unsigned int drainCount = outstandingCount();
DPRINTF(Drain, "Drain count: %u\n", drainCount);
if (drainCount == 0) {
DPRINTF(Drain, "RubyPort done draining, processing drain event\n");
@ -539,21 +539,9 @@ RubyPort::testDrainComplete()
}
unsigned int
RubyPort::getDrainCount(Event *de)
RubyPort::getChildDrainCount(Event *de)
{
int count = 0;
//
// If the sequencer is not empty, then requests need to drain.
// The outstandingCount is the number of requests outstanding and thus the
// number of times M5's timing port will process the drain event.
//
count += outstandingCount();
DPRINTF(Config, "outstanding count %d\n", outstandingCount());
// To simplify the draining process, the sequencer's deadlock detection
// event should have been descheduled.
assert(isDeadlockEventScheduled() == false);
if (pio_port.isConnected()) {
count += pio_port.drain(de);
@ -583,19 +571,31 @@ RubyPort::drain(Event *de)
descheduleDeadlockEvent();
}
int count = getDrainCount(de);
//
// If the RubyPort is not empty, then it needs to clear all outstanding
// requests before it should call drainEvent->process()
//
DPRINTF(Config, "outstanding count %d\n", outstandingCount());
bool need_drain = outstandingCount() > 0;
//
// Also, get the number of child ports that will also need to clear
// their buffered requests before they call drainEvent->process()
//
unsigned int child_drain_count = getChildDrainCount(de);
// Set status
if (count != 0) {
if (need_drain) {
drainEvent = de;
DPRINTF(Drain, "RubyPort not drained\n");
changeState(SimObject::Draining);
return count;
return child_drain_count + 1;
}
drainEvent = NULL;
changeState(SimObject::Drained);
return 0;
return child_drain_count;
}
void

View file

@ -166,7 +166,7 @@ class RubyPort : public MemObject
}
}
unsigned int getDrainCount(Event *de);
unsigned int getChildDrainCount(Event *de);
uint16_t m_port_id;
uint64_t m_request_cnt;

View file

@ -85,6 +85,8 @@ Sequencer::~Sequencer()
void
Sequencer::wakeup()
{
assert(getState() != SimObject::Draining);
// Check for deadlock of any of the requests
Time current_time = g_system_ptr->getTime();
@ -207,7 +209,7 @@ Sequencer::insertRequest(PacketPtr pkt, RubyRequestType request_type)
(m_writeRequestTable.size() + m_readRequestTable.size()));
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
if (!deadlockCheckEvent.scheduled() && getState() != SimObject::Draining) {
schedule(deadlockCheckEvent,
g_system_ptr->clockPeriod() * m_deadlock_threshold + curTick());
}