ruby: Fix for stallAndWait bug
It was previously possible for a stalled message to be reordered after an incomming message. This patch ensures that any stalled message stays in its original request order.
This commit is contained in:
parent
fbb220b4ae
commit
63a9f10de8
|
@ -86,7 +86,7 @@ MessageBuffer::areNSlotsAvailable(unsigned int n)
|
||||||
|
|
||||||
// determine the correct size for the current cycle
|
// determine the correct size for the current cycle
|
||||||
// pop operations shouldn't effect the network's visible size
|
// pop operations shouldn't effect the network's visible size
|
||||||
// until next cycle, but enqueue operations effect the visible
|
// until schd cycle, but enqueue operations effect the visible
|
||||||
// size immediately
|
// size immediately
|
||||||
unsigned int current_size = 0;
|
unsigned int current_size = 0;
|
||||||
|
|
||||||
|
@ -234,7 +234,7 @@ MessageBuffer::dequeue()
|
||||||
m_receiver->ticksToCycles(message->getDelayedTicks());
|
m_receiver->ticksToCycles(message->getDelayedTicks());
|
||||||
|
|
||||||
// record previous size and time so the current buffer size isn't
|
// record previous size and time so the current buffer size isn't
|
||||||
// adjusted until next cycle
|
// adjusted until schd cycle
|
||||||
if (m_time_last_time_pop < m_receiver->clockEdge()) {
|
if (m_time_last_time_pop < m_receiver->clockEdge()) {
|
||||||
m_size_at_cycle_start = m_prio_heap.size();
|
m_size_at_cycle_start = m_prio_heap.size();
|
||||||
m_time_last_time_pop = m_receiver->clockEdge();
|
m_time_last_time_pop = m_receiver->clockEdge();
|
||||||
|
@ -275,19 +275,19 @@ MessageBuffer::recycle()
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick nextTick)
|
MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick schdTick)
|
||||||
{
|
{
|
||||||
while(!lt.empty()) {
|
while(!lt.empty()) {
|
||||||
m_msg_counter++;
|
m_msg_counter++;
|
||||||
MsgPtr m = lt.front();
|
MsgPtr m = lt.front();
|
||||||
m->setLastEnqueueTime(nextTick);
|
m->setLastEnqueueTime(schdTick);
|
||||||
m->setMsgCounter(m_msg_counter);
|
m->setMsgCounter(m_msg_counter);
|
||||||
|
|
||||||
m_prio_heap.push_back(m);
|
m_prio_heap.push_back(m);
|
||||||
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
|
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
|
||||||
greater<MsgPtr>());
|
greater<MsgPtr>());
|
||||||
|
|
||||||
m_consumer->scheduleEventAbsolute(nextTick);
|
m_consumer->scheduleEventAbsolute(schdTick);
|
||||||
lt.pop_front();
|
lt.pop_front();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -297,13 +297,15 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
|
||||||
{
|
{
|
||||||
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
|
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
|
||||||
assert(m_stall_msg_map.count(addr) > 0);
|
assert(m_stall_msg_map.count(addr) > 0);
|
||||||
Tick nextTick = m_receiver->clockEdge(Cycles(1));
|
Tick curTick = m_receiver->clockEdge();
|
||||||
|
|
||||||
//
|
//
|
||||||
// Put all stalled messages associated with this address back on the
|
// Put all stalled messages associated with this address back on the
|
||||||
// prio heap
|
// prio heap. The reanalyzeList call will make sure the consumer is
|
||||||
|
// scheduled for the current cycle so that the previously stalled messages
|
||||||
|
// will be observed before any younger messages that may arrive this cycle
|
||||||
//
|
//
|
||||||
reanalyzeList(m_stall_msg_map[addr], nextTick);
|
reanalyzeList(m_stall_msg_map[addr], curTick);
|
||||||
m_stall_msg_map.erase(addr);
|
m_stall_msg_map.erase(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,15 +313,17 @@ void
|
||||||
MessageBuffer::reanalyzeAllMessages()
|
MessageBuffer::reanalyzeAllMessages()
|
||||||
{
|
{
|
||||||
DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
|
DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
|
||||||
Tick nextTick = m_receiver->clockEdge(Cycles(1));
|
Tick curTick = m_receiver->clockEdge();
|
||||||
|
|
||||||
//
|
//
|
||||||
// Put all stalled messages associated with this address back on the
|
// Put all stalled messages associated with this address back on the
|
||||||
// prio heap
|
// prio heap. The reanalyzeList call will make sure the consumer is
|
||||||
|
// scheduled for the current cycle so that the previously stalled messages
|
||||||
|
// will be observed before any younger messages that may arrive this cycle.
|
||||||
//
|
//
|
||||||
for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
|
for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
|
||||||
map_iter != m_stall_msg_map.end(); ++map_iter) {
|
map_iter != m_stall_msg_map.end(); ++map_iter) {
|
||||||
reanalyzeList(map_iter->second, nextTick);
|
reanalyzeList(map_iter->second, curTick);
|
||||||
}
|
}
|
||||||
m_stall_msg_map.clear();
|
m_stall_msg_map.clear();
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,6 +154,7 @@ AbstractController::wakeUpAllBuffers()
|
||||||
//
|
//
|
||||||
|
|
||||||
std::vector<MsgVecType*> wokeUpMsgVecs;
|
std::vector<MsgVecType*> wokeUpMsgVecs;
|
||||||
|
MsgBufType wokeUpMsgBufs;
|
||||||
|
|
||||||
if(m_waiting_buffers.size() > 0) {
|
if(m_waiting_buffers.size() > 0) {
|
||||||
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
|
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
|
||||||
|
@ -162,8 +163,13 @@ AbstractController::wakeUpAllBuffers()
|
||||||
for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
|
for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
|
||||||
vec_iter != buf_iter->second->end();
|
vec_iter != buf_iter->second->end();
|
||||||
++vec_iter) {
|
++vec_iter) {
|
||||||
if (*vec_iter != NULL) {
|
//
|
||||||
|
// Make sure the MessageBuffer has not already be reanalyzed
|
||||||
|
//
|
||||||
|
if (*vec_iter != NULL &&
|
||||||
|
(wokeUpMsgBufs.count(*vec_iter) == 0)) {
|
||||||
(*vec_iter)->reanalyzeAllMessages();
|
(*vec_iter)->reanalyzeAllMessages();
|
||||||
|
wokeUpMsgBufs.insert(*vec_iter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
wokeUpMsgVecs.push_back(buf_iter->second);
|
wokeUpMsgVecs.push_back(buf_iter->second);
|
||||||
|
|
|
@ -150,6 +150,7 @@ class AbstractController : public MemObject, public Consumer
|
||||||
std::map<Address, MessageBuffer*> m_block_map;
|
std::map<Address, MessageBuffer*> m_block_map;
|
||||||
|
|
||||||
typedef std::vector<MessageBuffer*> MsgVecType;
|
typedef std::vector<MessageBuffer*> MsgVecType;
|
||||||
|
typedef std::set<MessageBuffer*> MsgBufType;
|
||||||
typedef std::map< Address, MsgVecType* > WaitingBufType;
|
typedef std::map< Address, MsgVecType* > WaitingBufType;
|
||||||
WaitingBufType m_waiting_buffers;
|
WaitingBufType m_waiting_buffers;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue