ruby: perfect switch: refactor code

Refactored the code in operateVnet(), moved partly to a new function
operateMessageBuffer().  This is required since a later patch moves to having a
wakeup event per MessageBuffer instead of one event for the entire Switch.
This commit is contained in:
Nilay Vaish 2015-09-12 16:16:17 -05:00
parent 25cd13dbf1
commit 8b199b775e
2 changed files with 136 additions and 129 deletions

View file

@ -103,9 +103,6 @@ PerfectSwitch::~PerfectSwitch()
void void
PerfectSwitch::operateVnet(int vnet) PerfectSwitch::operateVnet(int vnet)
{ {
MsgPtr msg_ptr;
Message *net_msg_ptr = NULL;
// This is for round-robin scheduling // This is for round-robin scheduling
int incoming = m_round_robin_start; int incoming = m_round_robin_start;
m_round_robin_start++; m_round_robin_start++;
@ -122,10 +119,6 @@ PerfectSwitch::operateVnet(int vnet)
incoming = 0; incoming = 0;
} }
// temporary vectors to store the routing results
vector<LinkID> output_links;
vector<NetDest> output_link_destinations;
// Is there a message waiting? // Is there a message waiting?
if (m_in[incoming].size() <= vnet) { if (m_in[incoming].size() <= vnet) {
continue; continue;
@ -136,139 +129,152 @@ PerfectSwitch::operateVnet(int vnet)
continue; continue;
} }
while (buffer->isReady()) { operateMessageBuffer(buffer, incoming, vnet);
DPRINTF(RubyNetwork, "incoming: %d\n", incoming); }
}
}
// Peek at message void
msg_ptr = buffer->peekMsgPtr(); PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
net_msg_ptr = msg_ptr.get(); int vnet)
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr)); {
MsgPtr msg_ptr;
Message *net_msg_ptr = NULL;
output_links.clear(); // temporary vectors to store the routing results
output_link_destinations.clear(); vector<LinkID> output_links;
NetDest msg_dsts = net_msg_ptr->getDestination(); vector<NetDest> output_link_destinations;
// Unfortunately, the token-protocol sends some while (buffer->isReady()) {
// zero-destination messages, so this assert isn't valid DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// assert(msg_dsts.count() > 0);
assert(m_link_order.size() == m_routing_table.size()); // Peek at message
assert(m_link_order.size() == m_out.size()); msg_ptr = buffer->peekMsgPtr();
net_msg_ptr = msg_ptr.get();
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
if (m_network_ptr->getAdaptiveRouting()) { output_links.clear();
if (m_network_ptr->isVNetOrdered(vnet)) { output_link_destinations.clear();
// Don't adaptively route NetDest msg_dsts = net_msg_ptr->getDestination();
for (int out = 0; out < m_out.size(); out++) {
m_link_order[out].m_link = out;
m_link_order[out].m_value = 0;
}
} else {
// Find how clogged each link is
for (int out = 0; out < m_out.size(); out++) {
int out_queue_length = 0;
for (int v = 0; v < m_virtual_networks; v++) {
out_queue_length += m_out[out][v]->getSize();
}
int value =
(out_queue_length << 8) |
random_mt.random(0, 0xff);
m_link_order[out].m_link = out;
m_link_order[out].m_value = value;
}
// Look at the most empty link first // Unfortunately, the token-protocol sends some
sort(m_link_order.begin(), m_link_order.end()); // zero-destination messages, so this assert isn't valid
// assert(msg_dsts.count() > 0);
assert(m_link_order.size() == m_routing_table.size());
assert(m_link_order.size() == m_out.size());
if (m_network_ptr->getAdaptiveRouting()) {
if (m_network_ptr->isVNetOrdered(vnet)) {
// Don't adaptively route
for (int out = 0; out < m_out.size(); out++) {
m_link_order[out].m_link = out;
m_link_order[out].m_value = 0;
}
} else {
// Find how clogged each link is
for (int out = 0; out < m_out.size(); out++) {
int out_queue_length = 0;
for (int v = 0; v < m_virtual_networks; v++) {
out_queue_length += m_out[out][v]->getSize();
} }
int value =
(out_queue_length << 8) |
random_mt.random(0, 0xff);
m_link_order[out].m_link = out;
m_link_order[out].m_value = value;
} }
for (int i = 0; i < m_routing_table.size(); i++) { // Look at the most empty link first
// pick the next link to look at sort(m_link_order.begin(), m_link_order.end());
int link = m_link_order[i].m_link;
NetDest dst = m_routing_table[link];
DPRINTF(RubyNetwork, "dst: %s\n", dst);
if (!msg_dsts.intersectionIsNotEmpty(dst))
continue;
// Remember what link we're using
output_links.push_back(link);
// Need to remember which destinations need this message in
// another vector. This Set is the intersection of the
// routing_table entry and the current destination set. The
// intersection must not be empty, since we are inside "if"
output_link_destinations.push_back(msg_dsts.AND(dst));
// Next, we update the msg_destination not to include
// those nodes that were already handled by this link
msg_dsts.removeNetDest(dst);
}
assert(msg_dsts.count() == 0);
// Check for resources - for all outgoing queues
bool enough = true;
for (int i = 0; i < output_links.size(); i++) {
int outgoing = output_links[i];
if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
enough = false;
DPRINTF(RubyNetwork, "Checking if node is blocked ..."
"outgoing: %d, vnet: %d, enough: %d\n",
outgoing, vnet, enough);
}
// There were not enough resources
if (!enough) {
scheduleEvent(Cycles(1));
DPRINTF(RubyNetwork, "Can't deliver message since a node "
"is blocked\n");
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
break; // go to next incoming port
}
MsgPtr unmodified_msg_ptr;
if (output_links.size() > 1) {
// If we are sending this message down more than one link
// (size>1), we need to make a copy of the message so each
// branch can have a different internal destination we need
// to create an unmodified MsgPtr because the MessageBuffer
// enqueue func will modify the message
// This magic line creates a private copy of the message
unmodified_msg_ptr = msg_ptr->clone();
}
// Dequeue msg
buffer->dequeue();
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
for (int i=0; i<output_links.size(); i++) {
int outgoing = output_links[i];
if (i > 0) {
// create a private copy of the unmodified message
msg_ptr = unmodified_msg_ptr->clone();
}
// Change the internal destination set of the message so it
// knows which destinations this link is responsible for.
net_msg_ptr = msg_ptr.get();
net_msg_ptr->getDestination() =
output_link_destinations[i];
// Enqeue msg
DPRINTF(RubyNetwork, "Enqueuing net msg from "
"inport[%d][%d] to outport [%d][%d].\n",
incoming, vnet, outgoing, vnet);
m_out[outgoing][vnet]->enqueue(msg_ptr);
}
} }
} }
for (int i = 0; i < m_routing_table.size(); i++) {
// pick the next link to look at
int link = m_link_order[i].m_link;
NetDest dst = m_routing_table[link];
DPRINTF(RubyNetwork, "dst: %s\n", dst);
if (!msg_dsts.intersectionIsNotEmpty(dst))
continue;
// Remember what link we're using
output_links.push_back(link);
// Need to remember which destinations need this message in
// another vector. This Set is the intersection of the
// routing_table entry and the current destination set. The
// intersection must not be empty, since we are inside "if"
output_link_destinations.push_back(msg_dsts.AND(dst));
// Next, we update the msg_destination not to include
// those nodes that were already handled by this link
msg_dsts.removeNetDest(dst);
}
assert(msg_dsts.count() == 0);
// Check for resources - for all outgoing queues
bool enough = true;
for (int i = 0; i < output_links.size(); i++) {
int outgoing = output_links[i];
if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
enough = false;
DPRINTF(RubyNetwork, "Checking if node is blocked ..."
"outgoing: %d, vnet: %d, enough: %d\n",
outgoing, vnet, enough);
}
// There were not enough resources
if (!enough) {
scheduleEvent(Cycles(1));
DPRINTF(RubyNetwork, "Can't deliver message since a node "
"is blocked\n");
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
break; // go to next incoming port
}
MsgPtr unmodified_msg_ptr;
if (output_links.size() > 1) {
// If we are sending this message down more than one link
// (size>1), we need to make a copy of the message so each
// branch can have a different internal destination we need
// to create an unmodified MsgPtr because the MessageBuffer
// enqueue func will modify the message
// This magic line creates a private copy of the message
unmodified_msg_ptr = msg_ptr->clone();
}
// Dequeue msg
buffer->dequeue();
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
for (int i=0; i<output_links.size(); i++) {
int outgoing = output_links[i];
if (i > 0) {
// create a private copy of the unmodified message
msg_ptr = unmodified_msg_ptr->clone();
}
// Change the internal destination set of the message so it
// knows which destinations this link is responsible for.
net_msg_ptr = msg_ptr.get();
net_msg_ptr->getDestination() = output_link_destinations[i];
// Enqeue msg
DPRINTF(RubyNetwork, "Enqueuing net msg from "
"inport[%d][%d] to outport [%d][%d].\n",
incoming, vnet, outgoing, vnet);
m_out[outgoing][vnet]->enqueue(msg_ptr);
}
} }
} }

View file

@ -85,6 +85,7 @@ class PerfectSwitch : public Consumer
PerfectSwitch& operator=(const PerfectSwitch& obj); PerfectSwitch& operator=(const PerfectSwitch& obj);
void operateVnet(int vnet); void operateVnet(int vnet);
void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
const SwitchID m_switch_id; const SwitchID m_switch_id;
Switch * const m_switch; Switch * const m_switch;