ruby: message buffer: drop dequeue_getDelayCycles()

The functionality of updating and returning the delay cycles would now be
performed by the dequeue() function itself.
This commit is contained in:
Nilay Vaish 2014-05-23 06:07:02 -05:00
parent 1e26b7ea29
commit 8bf41e41c1
11 changed files with 31 additions and 37 deletions

View file

@ -474,12 +474,12 @@ machine(L0Cache, "MESI Directory L0 Cache")
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, messgeBuffer_in.dequeue_getDelayCycles());
profileMsgDelay(2, messgeBuffer_in.dequeue());
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(1, messgeBuffer_in.dequeue_getDelayCycles());
profileMsgDelay(1, messgeBuffer_in.dequeue());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {

View file

@ -640,12 +640,12 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
action(l_popL2RequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, requestNetwork_in.dequeue_getDelayCycles());
profileMsgDelay(2, requestNetwork_in.dequeue());
}
action(o_popL2ResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
profileMsgDelay(1, responseNetwork_in.dequeue());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {

View file

@ -833,12 +833,14 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
mandatoryQueue_in.dequeue();
}
action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, requestL1Network_in.dequeue_getDelayCycles());
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, requestL1Network_in.dequeue());
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(1, responseL1Network_in.dequeue_getDelayCycles());
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(1, responseL1Network_in.dequeue());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {

View file

@ -590,15 +590,15 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
}
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
profileMsgDelay(0, L1RequestL2Network_in.dequeue_getDelayCycles());
profileMsgDelay(0, L1RequestL2Network_in.dequeue());
}
action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
profileMsgDelay(0, L1unblockNetwork_in.dequeue());
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
profileMsgDelay(1, responseL2Network_in.dequeue_getDelayCycles());
profileMsgDelay(1, responseL2Network_in.dequeue());
}
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {

View file

@ -317,11 +317,11 @@ machine(L1Cache, "MI Example L1 Cache")
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
profileMsgDelay(1, responseNetwork_in.dequeue());
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
}
action(p_profileMiss, "pi", desc="Profile cache miss") {

View file

@ -41,8 +41,7 @@ external_type(Scalar, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
bool isReady();
void dequeue();
Cycles dequeue_getDelayCycles();
Cycles dequeue();
void recycle();
bool isEmpty();
}

View file

@ -220,8 +220,11 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta)
}
Cycles
MessageBuffer::dequeue_getDelayCycles()
MessageBuffer::dequeue()
{
DPRINTF(RubyQueue, "Popping\n");
assert(isReady());
// get MsgPtr of the message about to be dequeued
MsgPtr message = m_prio_heap.front().m_msgptr;
@ -229,16 +232,6 @@ MessageBuffer::dequeue_getDelayCycles()
message->updateDelayedTicks(m_receiver->clockEdge());
Cycles delayCycles =
m_receiver->ticksToCycles(message->getDelayedTicks());
dequeue();
return delayCycles;
}
void
MessageBuffer::dequeue()
{
DPRINTF(RubyQueue, "Popping\n");
assert(isReady());
// record previous size and time so the current buffer size isn't
// adjusted until next cycle
@ -250,6 +243,8 @@ MessageBuffer::dequeue()
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
m_prio_heap.pop_back();
return delayCycles;
}
void

View file

@ -118,11 +118,9 @@ class MessageBuffer
void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
void enqueue(MsgPtr message, Cycles delta);
//! Updates the delay cycles of the message at the of the queue,
//! Updates the delay cycles of the message at the head of the queue,
//! removes it from the queue and returns its total delay.
Cycles dequeue_getDelayCycles();
void dequeue();
Cycles dequeue();
void recycle();
bool isEmpty() const { return m_prio_heap.size() == 0; }

View file

@ -260,6 +260,10 @@ PerfectSwitch::wakeup()
unmodified_msg_ptr = msg_ptr->clone();
}
// Dequeue msg
m_in[incoming][vnet]->dequeue();
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
for (int i=0; i<output_links.size(); i++) {
int outgoing = output_links[i];
@ -284,10 +288,6 @@ PerfectSwitch::wakeup()
m_out[outgoing][vnet]->enqueue(msg_ptr);
}
// Dequeue msg
m_in[incoming][vnet]->dequeue();
m_pending_message_count[vnet]--;
}
}
}

View file

@ -157,8 +157,8 @@ Throttle::wakeup()
g_system_ptr->curCycle());
// Move the message
m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
m_in[vnet]->dequeue();
m_out[vnet]->enqueue(msg_ptr, m_link_latency);
// Count the message
m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;

View file

@ -42,8 +42,8 @@ class ExprStatementAST(StatementAST):
# The return type must be void
if actual_type != self.symtab.find("void", Type):
self.expr.error("Non-void return must not be ignored, " + \
"return type is '%s'", actual_type.ident)
self.expr.warning("Non-void return ignored, " + \
"return type is '%s'", actual_type.ident)
def findResources(self, resources):
self.expr.findResources(resources)