diff --git a/src/mem/protocol/MESI_Three_Level-L0cache.sm b/src/mem/protocol/MESI_Three_Level-L0cache.sm index fd1b85e0d..47a41b83a 100644 --- a/src/mem/protocol/MESI_Three_Level-L0cache.sm +++ b/src/mem/protocol/MESI_Three_Level-L0cache.sm @@ -474,12 +474,12 @@ machine(L0Cache, "MESI Directory L0 Cache") action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") { - profileMsgDelay(2, messgeBuffer_in.dequeue_getDelayCycles()); + profileMsgDelay(2, messgeBuffer_in.dequeue()); } action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") { - profileMsgDelay(1, messgeBuffer_in.dequeue_getDelayCycles()); + profileMsgDelay(1, messgeBuffer_in.dequeue()); } action(s_deallocateTBE, "s", desc="Deallocate TBE") { diff --git a/src/mem/protocol/MESI_Three_Level-L1cache.sm b/src/mem/protocol/MESI_Three_Level-L1cache.sm index 43a9a49cf..36068dbac 100644 --- a/src/mem/protocol/MESI_Three_Level-L1cache.sm +++ b/src/mem/protocol/MESI_Three_Level-L1cache.sm @@ -640,12 +640,12 @@ machine(L1Cache, "MESI Directory L1 Cache CMP") action(l_popL2RequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") { - profileMsgDelay(2, requestNetwork_in.dequeue_getDelayCycles()); + profileMsgDelay(2, requestNetwork_in.dequeue()); } action(o_popL2ResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") { - profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles()); + profileMsgDelay(1, responseNetwork_in.dequeue()); } action(s_deallocateTBE, "s", desc="Deallocate TBE") { diff --git a/src/mem/protocol/MESI_Two_Level-L1cache.sm b/src/mem/protocol/MESI_Two_Level-L1cache.sm index 5ccd453bf..a202a8deb 100644 --- a/src/mem/protocol/MESI_Two_Level-L1cache.sm +++ b/src/mem/protocol/MESI_Two_Level-L1cache.sm @@ -833,12 +833,14 @@ machine(L1Cache, "MESI Directory L1 Cache CMP") mandatoryQueue_in.dequeue(); } - action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") { - profileMsgDelay(2, requestL1Network_in.dequeue_getDelayCycles()); + action(l_popRequestQueue, "l", + desc="Pop incoming request queue and profile the delay within this virtual network") { + profileMsgDelay(2, requestL1Network_in.dequeue()); } - action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") { - profileMsgDelay(1, responseL1Network_in.dequeue_getDelayCycles()); + action(o_popIncomingResponseQueue, "o", + desc="Pop Incoming Response queue and profile the delay within this virtual network") { + profileMsgDelay(1, responseL1Network_in.dequeue()); } action(s_deallocateTBE, "s", desc="Deallocate TBE") { diff --git a/src/mem/protocol/MESI_Two_Level-L2cache.sm b/src/mem/protocol/MESI_Two_Level-L2cache.sm index 7cd5560fc..f69eaa9a9 100644 --- a/src/mem/protocol/MESI_Two_Level-L2cache.sm +++ b/src/mem/protocol/MESI_Two_Level-L2cache.sm @@ -590,15 +590,15 @@ machine(L2Cache, "MESI Directory L2 Cache CMP") } action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") { - profileMsgDelay(0, L1RequestL2Network_in.dequeue_getDelayCycles()); + profileMsgDelay(0, L1RequestL2Network_in.dequeue()); } action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") { - profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles()); + profileMsgDelay(0, L1unblockNetwork_in.dequeue()); } action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") { - profileMsgDelay(1, responseL2Network_in.dequeue_getDelayCycles()); + profileMsgDelay(1, responseL2Network_in.dequeue()); } action(m_writeDataToCache, "m", desc="Write data from response queue to cache") { diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm index 29cf8d482..561de2397 100644 --- a/src/mem/protocol/MI_example-cache.sm +++ b/src/mem/protocol/MI_example-cache.sm @@ -317,11 +317,11 @@ machine(L1Cache, "MI Example L1 Cache") } action(n_popResponseQueue, "n", desc="Pop the response queue") { - profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles()); + profileMsgDelay(1, responseNetwork_in.dequeue()); } action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") { - profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles()); + profileMsgDelay(2, forwardRequestNetwork_in.dequeue()); } action(p_profileMiss, "pi", desc="Profile cache miss") { diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm index a601b2cfc..789595dbe 100644 --- a/src/mem/protocol/RubySlicc_Types.sm +++ b/src/mem/protocol/RubySlicc_Types.sm @@ -41,8 +41,7 @@ external_type(Scalar, primitive="yes"); structure(InPort, external = "yes", primitive="yes") { bool isReady(); - void dequeue(); - Cycles dequeue_getDelayCycles(); + Cycles dequeue(); void recycle(); bool isEmpty(); } diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc index 19ad9ee7d..298fdb3c3 100644 --- a/src/mem/ruby/buffers/MessageBuffer.cc +++ b/src/mem/ruby/buffers/MessageBuffer.cc @@ -220,8 +220,11 @@ MessageBuffer::enqueue(MsgPtr message, Cycles delta) } Cycles -MessageBuffer::dequeue_getDelayCycles() +MessageBuffer::dequeue() { + DPRINTF(RubyQueue, "Popping\n"); + assert(isReady()); + // get MsgPtr of the message about to be dequeued MsgPtr message = m_prio_heap.front().m_msgptr; @@ -229,16 +232,6 @@ MessageBuffer::dequeue_getDelayCycles() message->updateDelayedTicks(m_receiver->clockEdge()); Cycles delayCycles = m_receiver->ticksToCycles(message->getDelayedTicks()); - dequeue(); - - return delayCycles; -} - -void -MessageBuffer::dequeue() -{ - DPRINTF(RubyQueue, "Popping\n"); - assert(isReady()); // record previous size and time so the current buffer size isn't // adjusted until next cycle @@ -250,6 +243,8 @@ MessageBuffer::dequeue() pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater()); m_prio_heap.pop_back(); + + return delayCycles; } void diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh index 6019f3d6c..3b3a69a3e 100644 --- a/src/mem/ruby/buffers/MessageBuffer.hh +++ b/src/mem/ruby/buffers/MessageBuffer.hh @@ -118,11 +118,9 @@ class MessageBuffer void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); } void enqueue(MsgPtr message, Cycles delta); - //! Updates the delay cycles of the message at the of the queue, + //! Updates the delay cycles of the message at the head of the queue, //! removes it from the queue and returns its total delay. - Cycles dequeue_getDelayCycles(); - - void dequeue(); + Cycles dequeue(); void recycle(); bool isEmpty() const { return m_prio_heap.size() == 0; } diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc index ecd1eb0be..cf2430e36 100644 --- a/src/mem/ruby/network/simple/PerfectSwitch.cc +++ b/src/mem/ruby/network/simple/PerfectSwitch.cc @@ -260,6 +260,10 @@ PerfectSwitch::wakeup() unmodified_msg_ptr = msg_ptr->clone(); } + // Dequeue msg + m_in[incoming][vnet]->dequeue(); + m_pending_message_count[vnet]--; + // Enqueue it - for all outgoing queues for (int i=0; ienqueue(msg_ptr); } - - // Dequeue msg - m_in[incoming][vnet]->dequeue(); - m_pending_message_count[vnet]--; } } } diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc index 778436c6d..da7b1732b 100644 --- a/src/mem/ruby/network/simple/Throttle.cc +++ b/src/mem/ruby/network/simple/Throttle.cc @@ -157,8 +157,8 @@ Throttle::wakeup() g_system_ptr->curCycle()); // Move the message - m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency); m_in[vnet]->dequeue(); + m_out[vnet]->enqueue(msg_ptr, m_link_latency); // Count the message m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++; diff --git a/src/mem/slicc/ast/ExprStatementAST.py b/src/mem/slicc/ast/ExprStatementAST.py index 898fccf3d..acb69f799 100644 --- a/src/mem/slicc/ast/ExprStatementAST.py +++ b/src/mem/slicc/ast/ExprStatementAST.py @@ -42,8 +42,8 @@ class ExprStatementAST(StatementAST): # The return type must be void if actual_type != self.symtab.find("void", Type): - self.expr.error("Non-void return must not be ignored, " + \ - "return type is '%s'", actual_type.ident) + self.expr.warning("Non-void return ignored, " + \ + "return type is '%s'", actual_type.ident) def findResources(self, resources): self.expr.findResources(resources)