ruby: perfect switch: refactor code
Refactored the code in operateVnet(), moved partly to a new function operateMessageBuffer().
This commit is contained in:
parent
a706b6259a
commit
5f1d1ce5d4
2 changed files with 136 additions and 129 deletions
|
@ -104,9 +104,6 @@ PerfectSwitch::~PerfectSwitch()
|
|||
void
|
||||
PerfectSwitch::operateVnet(int vnet)
|
||||
{
|
||||
MsgPtr msg_ptr;
|
||||
Message *net_msg_ptr = NULL;
|
||||
|
||||
// This is for round-robin scheduling
|
||||
int incoming = m_round_robin_start;
|
||||
m_round_robin_start++;
|
||||
|
@ -123,10 +120,6 @@ PerfectSwitch::operateVnet(int vnet)
|
|||
incoming = 0;
|
||||
}
|
||||
|
||||
// temporary vectors to store the routing results
|
||||
vector<LinkID> output_links;
|
||||
vector<NetDest> output_link_destinations;
|
||||
|
||||
// Is there a message waiting?
|
||||
if (m_in[incoming].size() <= vnet) {
|
||||
continue;
|
||||
|
@ -137,139 +130,152 @@ PerfectSwitch::operateVnet(int vnet)
|
|||
continue;
|
||||
}
|
||||
|
||||
while (buffer->isReady()) {
|
||||
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
|
||||
operateMessageBuffer(buffer, incoming, vnet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Peek at message
|
||||
msg_ptr = buffer->peekMsgPtr();
|
||||
net_msg_ptr = msg_ptr.get();
|
||||
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
|
||||
void
|
||||
PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
|
||||
int vnet)
|
||||
{
|
||||
MsgPtr msg_ptr;
|
||||
Message *net_msg_ptr = NULL;
|
||||
|
||||
output_links.clear();
|
||||
output_link_destinations.clear();
|
||||
NetDest msg_dsts = net_msg_ptr->getDestination();
|
||||
// temporary vectors to store the routing results
|
||||
vector<LinkID> output_links;
|
||||
vector<NetDest> output_link_destinations;
|
||||
|
||||
// Unfortunately, the token-protocol sends some
|
||||
// zero-destination messages, so this assert isn't valid
|
||||
// assert(msg_dsts.count() > 0);
|
||||
while (buffer->isReady()) {
|
||||
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
|
||||
|
||||
assert(m_link_order.size() == m_routing_table.size());
|
||||
assert(m_link_order.size() == m_out.size());
|
||||
// Peek at message
|
||||
msg_ptr = buffer->peekMsgPtr();
|
||||
net_msg_ptr = msg_ptr.get();
|
||||
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
|
||||
|
||||
if (m_network_ptr->getAdaptiveRouting()) {
|
||||
if (m_network_ptr->isVNetOrdered(vnet)) {
|
||||
// Don't adaptively route
|
||||
for (int out = 0; out < m_out.size(); out++) {
|
||||
m_link_order[out].m_link = out;
|
||||
m_link_order[out].m_value = 0;
|
||||
}
|
||||
} else {
|
||||
// Find how clogged each link is
|
||||
for (int out = 0; out < m_out.size(); out++) {
|
||||
int out_queue_length = 0;
|
||||
for (int v = 0; v < m_virtual_networks; v++) {
|
||||
out_queue_length += m_out[out][v]->getSize();
|
||||
}
|
||||
int value =
|
||||
(out_queue_length << 8) |
|
||||
random_mt.random(0, 0xff);
|
||||
m_link_order[out].m_link = out;
|
||||
m_link_order[out].m_value = value;
|
||||
}
|
||||
output_links.clear();
|
||||
output_link_destinations.clear();
|
||||
NetDest msg_dsts = net_msg_ptr->getDestination();
|
||||
|
||||
// Look at the most empty link first
|
||||
sort(m_link_order.begin(), m_link_order.end());
|
||||
// Unfortunately, the token-protocol sends some
|
||||
// zero-destination messages, so this assert isn't valid
|
||||
// assert(msg_dsts.count() > 0);
|
||||
|
||||
assert(m_link_order.size() == m_routing_table.size());
|
||||
assert(m_link_order.size() == m_out.size());
|
||||
|
||||
if (m_network_ptr->getAdaptiveRouting()) {
|
||||
if (m_network_ptr->isVNetOrdered(vnet)) {
|
||||
// Don't adaptively route
|
||||
for (int out = 0; out < m_out.size(); out++) {
|
||||
m_link_order[out].m_link = out;
|
||||
m_link_order[out].m_value = 0;
|
||||
}
|
||||
} else {
|
||||
// Find how clogged each link is
|
||||
for (int out = 0; out < m_out.size(); out++) {
|
||||
int out_queue_length = 0;
|
||||
for (int v = 0; v < m_virtual_networks; v++) {
|
||||
out_queue_length += m_out[out][v]->getSize();
|
||||
}
|
||||
int value =
|
||||
(out_queue_length << 8) |
|
||||
random_mt.random(0, 0xff);
|
||||
m_link_order[out].m_link = out;
|
||||
m_link_order[out].m_value = value;
|
||||
}
|
||||
|
||||
for (int i = 0; i < m_routing_table.size(); i++) {
|
||||
// pick the next link to look at
|
||||
int link = m_link_order[i].m_link;
|
||||
NetDest dst = m_routing_table[link];
|
||||
DPRINTF(RubyNetwork, "dst: %s\n", dst);
|
||||
|
||||
if (!msg_dsts.intersectionIsNotEmpty(dst))
|
||||
continue;
|
||||
|
||||
// Remember what link we're using
|
||||
output_links.push_back(link);
|
||||
|
||||
// Need to remember which destinations need this message in
|
||||
// another vector. This Set is the intersection of the
|
||||
// routing_table entry and the current destination set. The
|
||||
// intersection must not be empty, since we are inside "if"
|
||||
output_link_destinations.push_back(msg_dsts.AND(dst));
|
||||
|
||||
// Next, we update the msg_destination not to include
|
||||
// those nodes that were already handled by this link
|
||||
msg_dsts.removeNetDest(dst);
|
||||
}
|
||||
|
||||
assert(msg_dsts.count() == 0);
|
||||
|
||||
// Check for resources - for all outgoing queues
|
||||
bool enough = true;
|
||||
for (int i = 0; i < output_links.size(); i++) {
|
||||
int outgoing = output_links[i];
|
||||
|
||||
if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
|
||||
enough = false;
|
||||
|
||||
DPRINTF(RubyNetwork, "Checking if node is blocked ..."
|
||||
"outgoing: %d, vnet: %d, enough: %d\n",
|
||||
outgoing, vnet, enough);
|
||||
}
|
||||
|
||||
// There were not enough resources
|
||||
if (!enough) {
|
||||
scheduleEvent(Cycles(1));
|
||||
DPRINTF(RubyNetwork, "Can't deliver message since a node "
|
||||
"is blocked\n");
|
||||
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
|
||||
break; // go to next incoming port
|
||||
}
|
||||
|
||||
MsgPtr unmodified_msg_ptr;
|
||||
|
||||
if (output_links.size() > 1) {
|
||||
// If we are sending this message down more than one link
|
||||
// (size>1), we need to make a copy of the message so each
|
||||
// branch can have a different internal destination we need
|
||||
// to create an unmodified MsgPtr because the MessageBuffer
|
||||
// enqueue func will modify the message
|
||||
|
||||
// This magic line creates a private copy of the message
|
||||
unmodified_msg_ptr = msg_ptr->clone();
|
||||
}
|
||||
|
||||
// Dequeue msg
|
||||
buffer->dequeue();
|
||||
m_pending_message_count[vnet]--;
|
||||
|
||||
// Enqueue it - for all outgoing queues
|
||||
for (int i=0; i<output_links.size(); i++) {
|
||||
int outgoing = output_links[i];
|
||||
|
||||
if (i > 0) {
|
||||
// create a private copy of the unmodified message
|
||||
msg_ptr = unmodified_msg_ptr->clone();
|
||||
}
|
||||
|
||||
// Change the internal destination set of the message so it
|
||||
// knows which destinations this link is responsible for.
|
||||
net_msg_ptr = msg_ptr.get();
|
||||
net_msg_ptr->getDestination() =
|
||||
output_link_destinations[i];
|
||||
|
||||
// Enqeue msg
|
||||
DPRINTF(RubyNetwork, "Enqueuing net msg from "
|
||||
"inport[%d][%d] to outport [%d][%d].\n",
|
||||
incoming, vnet, outgoing, vnet);
|
||||
|
||||
m_out[outgoing][vnet]->enqueue(msg_ptr);
|
||||
}
|
||||
// Look at the most empty link first
|
||||
sort(m_link_order.begin(), m_link_order.end());
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < m_routing_table.size(); i++) {
|
||||
// pick the next link to look at
|
||||
int link = m_link_order[i].m_link;
|
||||
NetDest dst = m_routing_table[link];
|
||||
DPRINTF(RubyNetwork, "dst: %s\n", dst);
|
||||
|
||||
if (!msg_dsts.intersectionIsNotEmpty(dst))
|
||||
continue;
|
||||
|
||||
// Remember what link we're using
|
||||
output_links.push_back(link);
|
||||
|
||||
// Need to remember which destinations need this message in
|
||||
// another vector. This Set is the intersection of the
|
||||
// routing_table entry and the current destination set. The
|
||||
// intersection must not be empty, since we are inside "if"
|
||||
output_link_destinations.push_back(msg_dsts.AND(dst));
|
||||
|
||||
// Next, we update the msg_destination not to include
|
||||
// those nodes that were already handled by this link
|
||||
msg_dsts.removeNetDest(dst);
|
||||
}
|
||||
|
||||
assert(msg_dsts.count() == 0);
|
||||
|
||||
// Check for resources - for all outgoing queues
|
||||
bool enough = true;
|
||||
for (int i = 0; i < output_links.size(); i++) {
|
||||
int outgoing = output_links[i];
|
||||
|
||||
if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
|
||||
enough = false;
|
||||
|
||||
DPRINTF(RubyNetwork, "Checking if node is blocked ..."
|
||||
"outgoing: %d, vnet: %d, enough: %d\n",
|
||||
outgoing, vnet, enough);
|
||||
}
|
||||
|
||||
// There were not enough resources
|
||||
if (!enough) {
|
||||
scheduleEvent(Cycles(1));
|
||||
DPRINTF(RubyNetwork, "Can't deliver message since a node "
|
||||
"is blocked\n");
|
||||
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
|
||||
break; // go to next incoming port
|
||||
}
|
||||
|
||||
MsgPtr unmodified_msg_ptr;
|
||||
|
||||
if (output_links.size() > 1) {
|
||||
// If we are sending this message down more than one link
|
||||
// (size>1), we need to make a copy of the message so each
|
||||
// branch can have a different internal destination we need
|
||||
// to create an unmodified MsgPtr because the MessageBuffer
|
||||
// enqueue func will modify the message
|
||||
|
||||
// This magic line creates a private copy of the message
|
||||
unmodified_msg_ptr = msg_ptr->clone();
|
||||
}
|
||||
|
||||
// Dequeue msg
|
||||
buffer->dequeue();
|
||||
m_pending_message_count[vnet]--;
|
||||
|
||||
// Enqueue it - for all outgoing queues
|
||||
for (int i=0; i<output_links.size(); i++) {
|
||||
int outgoing = output_links[i];
|
||||
|
||||
if (i > 0) {
|
||||
// create a private copy of the unmodified message
|
||||
msg_ptr = unmodified_msg_ptr->clone();
|
||||
}
|
||||
|
||||
// Change the internal destination set of the message so it
|
||||
// knows which destinations this link is responsible for.
|
||||
net_msg_ptr = msg_ptr.get();
|
||||
net_msg_ptr->getDestination() = output_link_destinations[i];
|
||||
|
||||
// Enqeue msg
|
||||
DPRINTF(RubyNetwork, "Enqueuing net msg from "
|
||||
"inport[%d][%d] to outport [%d][%d].\n",
|
||||
incoming, vnet, outgoing, vnet);
|
||||
|
||||
m_out[outgoing][vnet]->enqueue(msg_ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -85,6 +85,7 @@ class PerfectSwitch : public Consumer
|
|||
PerfectSwitch& operator=(const PerfectSwitch& obj);
|
||||
|
||||
void operateVnet(int vnet);
|
||||
void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
|
||||
|
||||
SwitchID m_switch_id;
|
||||
|
||||
|
|
Loading…
Reference in a new issue