ruby: Recycle latency fix for hammer

Patch allows each individual message buffer to have different recycle latencies
and allows the overall recycle latency to be specified at the cmd line. The
patch also adds profiling info to make sure no one processor's requests are
recycled too much.
This commit is contained in:
Brad Beckmann 2010-08-20 11:46:14 -07:00
parent f57053473a
commit af6b97e3ee
6 changed files with 45 additions and 23 deletions

View file

@ -105,6 +105,9 @@ def create_system(options, system, piobus, dma_devices):
no_mig_atomic = not \
options.allow_atomic_migration)
if options.recycle_latency:
l1_cntrl.recycle_latency = options.recycle_latency
exec("system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
@ -164,6 +167,9 @@ def create_system(options, system, piobus, dma_devices):
probe_filter_enabled = \
options.pf_on)
if options.recycle_latency:
dir_cntrl.recycle_latency = options.recycle_latency
exec("system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
@ -186,6 +192,9 @@ def create_system(options, system, piobus, dma_devices):
dma_cntrl.dma_sequencer.port = dma_device.dma
dma_cntrl_nodes.append(dma_cntrl)
if options.recycle_latency:
dma_cntrl.recycle_latency = options.recycle_latency
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)

View file

@ -54,6 +54,9 @@ def define_options(parser):
parser.add_option("--ruby-debug", action="store_true", default=False)
parser.add_option("--ruby-debug-cycle", type="int", default=1)
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)

View file

@ -52,7 +52,7 @@ machine(Directory, "AMD Hammer-like protocol")
MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
// STATES
@ -309,6 +309,22 @@ machine(Directory, "AMD Hammer-like protocol")
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
@ -333,22 +349,6 @@ machine(Directory, "AMD Hammer-like protocol")
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
@ -766,6 +766,9 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
peek(unblockNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
unblockNetwork_in.dequeue();
}
@ -880,6 +883,9 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
peek(requestQueue_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
requestQueue_in.recycle();
}

View file

@ -167,7 +167,7 @@ class MessageBuffer
int m_not_avail_count; // count the # of times I didn't have N
// slots available
int m_msg_counter;
uint64 m_msg_counter;
int m_priority_rank;
bool m_strict_fifo;
bool m_ordering_set;

View file

@ -54,7 +54,7 @@ class MessageBufferNode
public:
Time m_time;
int m_msg_counter; // FIXME, should this be a 64-bit value?
uint64 m_msg_counter; // FIXME, should this be a 64-bit value?
MsgPtr m_msgptr;
};

View file

@ -494,6 +494,7 @@ $c_ident::init()
if vtype.isBuffer and \
"rank" in var and "trigger_queue" not in var:
code('$vid->setPriority(${{var["rank"]}});')
else:
# Network port object
network = var["network"]
@ -537,6 +538,13 @@ $vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{v
''')
if vtype.isBuffer:
if "recycle_latency" in var:
code('$vid->setRecycleLatency(${{var["recycle_latency"]}});')
else:
code('$vid->setRecycleLatency(m_recycle_latency);')
# Set the queue consumers
code.insert_newline()
for port in self.in_ports:
@ -562,10 +570,6 @@ $vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{v
event = "%s_Event_%s" % (self.ident, trans.event.ident)
code('m_profiler.possibleTransition($state, $event);')
# added by SS to initialize recycle_latency of message buffers
for buf in self.message_buffer_names:
code("$buf->setRecycleLatency(m_recycle_latency);")
code.dedent()
code('}')