ruby: Fix for stallAndWait bug

It was previously possible for a stalled message to be reordered after an
incomming message. This patch ensures that any stalled message stays in its
original request order.
This commit is contained in:
David Hashe 2015-07-20 09:15:18 -05:00
parent fbb220b4ae
commit 63a9f10de8
3 changed files with 23 additions and 12 deletions

View file

@ -86,7 +86,7 @@ MessageBuffer::areNSlotsAvailable(unsigned int n)
// determine the correct size for the current cycle
// pop operations shouldn't effect the network's visible size
// until next cycle, but enqueue operations effect the visible
// until schd cycle, but enqueue operations effect the visible
// size immediately
unsigned int current_size = 0;
@ -234,7 +234,7 @@ MessageBuffer::dequeue()
m_receiver->ticksToCycles(message->getDelayedTicks());
// record previous size and time so the current buffer size isn't
// adjusted until next cycle
// adjusted until schd cycle
if (m_time_last_time_pop < m_receiver->clockEdge()) {
m_size_at_cycle_start = m_prio_heap.size();
m_time_last_time_pop = m_receiver->clockEdge();
@ -275,19 +275,19 @@ MessageBuffer::recycle()
}
void
MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick nextTick)
MessageBuffer::reanalyzeList(list<MsgPtr> &lt, Tick schdTick)
{
while(!lt.empty()) {
m_msg_counter++;
MsgPtr m = lt.front();
m->setLastEnqueueTime(nextTick);
m->setLastEnqueueTime(schdTick);
m->setMsgCounter(m_msg_counter);
m_prio_heap.push_back(m);
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MsgPtr>());
m_consumer->scheduleEventAbsolute(nextTick);
m_consumer->scheduleEventAbsolute(schdTick);
lt.pop_front();
}
}
@ -297,13 +297,15 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
{
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
assert(m_stall_msg_map.count(addr) > 0);
Tick nextTick = m_receiver->clockEdge(Cycles(1));
Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
// prio heap
// prio heap. The reanalyzeList call will make sure the consumer is
// scheduled for the current cycle so that the previously stalled messages
// will be observed before any younger messages that may arrive this cycle
//
reanalyzeList(m_stall_msg_map[addr], nextTick);
reanalyzeList(m_stall_msg_map[addr], curTick);
m_stall_msg_map.erase(addr);
}
@ -311,15 +313,17 @@ void
MessageBuffer::reanalyzeAllMessages()
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
Tick nextTick = m_receiver->clockEdge(Cycles(1));
Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
// prio heap
// prio heap. The reanalyzeList call will make sure the consumer is
// scheduled for the current cycle so that the previously stalled messages
// will be observed before any younger messages that may arrive this cycle.
//
for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
map_iter != m_stall_msg_map.end(); ++map_iter) {
reanalyzeList(map_iter->second, nextTick);
reanalyzeList(map_iter->second, curTick);
}
m_stall_msg_map.clear();
}

View file

@ -154,6 +154,7 @@ AbstractController::wakeUpAllBuffers()
//
std::vector<MsgVecType*> wokeUpMsgVecs;
MsgBufType wokeUpMsgBufs;
if(m_waiting_buffers.size() > 0) {
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
@ -162,8 +163,13 @@ AbstractController::wakeUpAllBuffers()
for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
vec_iter != buf_iter->second->end();
++vec_iter) {
if (*vec_iter != NULL) {
//
// Make sure the MessageBuffer has not already be reanalyzed
//
if (*vec_iter != NULL &&
(wokeUpMsgBufs.count(*vec_iter) == 0)) {
(*vec_iter)->reanalyzeAllMessages();
wokeUpMsgBufs.insert(*vec_iter);
}
}
wokeUpMsgVecs.push_back(buf_iter->second);

View file

@ -150,6 +150,7 @@ class AbstractController : public MemObject, public Consumer
std::map<Address, MessageBuffer*> m_block_map;
typedef std::vector<MessageBuffer*> MsgVecType;
typedef std::set<MessageBuffer*> MsgBufType;
typedef std::map< Address, MsgVecType* > WaitingBufType;
WaitingBufType m_waiting_buffers;