ruby: message buffers: significant changes

This patch is the final patch in a series of patches.  The aim of the series
is to make ruby more configurable than it was.  More specifically, the
connections between controllers are not at all possible (unless one is ready
to make significant changes to the coherence protocol).  Moreover the buffers
themselves are magically connected to the network inside the slicc code.
These connections are not part of the configuration file.

This patch makes changes so that these connections will now be made in the
python configuration files associated with the protocols.  This requires
each state machine to expose the message buffers it uses for input and output.
So, the patch makes these buffers configurable members of the machines.

The patch drops the slicc code that usd to connect these buffers to the
network.  Now these buffers are exposed to the python configuration system
as Master and Slave ports.  In the configuration files, any master port
can be connected any slave port.  The file pyobject.cc has been modified to
take care of allocating the actual message buffer.  This is inline with how
other port connections work.
This commit is contained in:
Nilay Vaish 2014-09-01 16:55:47 -05:00
parent 00286fc5cb
commit 7a0d5aafe4
54 changed files with 940 additions and 671 deletions

View file

@ -129,7 +129,19 @@ def create_system(options, system, dma_ports, ruby_system):
cpu_sequencers.append(cpu_seq)
l0_cntrl_nodes.append(l0_cntrl)
l1_cntrl_nodes.append(l1_cntrl)
l0_cntrl.peer = l1_cntrl
# Connect the L0 and L1 controllers
l0_cntrl.bufferToL1 = l1_cntrl.bufferFromL0
l0_cntrl.bufferFromL1 = l1_cntrl.bufferToL0
# Connect the L1 controllers and the network
l1_cntrl.requestToL2 = ruby_system.network.slave
l1_cntrl.responseToL2 = ruby_system.network.slave
l1_cntrl.unblockToL2 = ruby_system.network.slave
l1_cntrl.requestFromL2 = ruby_system.network.master
l1_cntrl.responseFromL2 = ruby_system.network.master
for j in xrange(num_l2caches_per_cluster):
l2_cache = L2Cache(size = options.l2_size,
@ -146,6 +158,15 @@ def create_system(options, system, dma_ports, ruby_system):
i * num_l2caches_per_cluster + j))
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = ruby_system.network.slave
l2_cntrl.unblockToL2Cache = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
l2_cntrl.responseToL2Cache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@ -183,6 +204,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.responseFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller

View file

@ -108,12 +108,19 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromL1Cache = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = ruby_system.network.slave
l1_cntrl.unblockFromL1Cache = ruby_system.network.slave
l1_cntrl.requestToL1Cache = ruby_system.network.master
l1_cntrl.responseToL1Cache = ruby_system.network.master
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
@ -132,10 +139,21 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = ruby_system.network.slave
l2_cntrl.unblockToL2Cache = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
l2_cntrl.responseToL2Cache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
@ -169,10 +187,14 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.responseFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
@ -185,6 +207,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the dma controller to the network
dma_cntrl.responseFromDir = ruby_system.network.master
dma_cntrl.requestToDir = ruby_system.network.slave
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \

View file

@ -94,12 +94,17 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromCache = ruby_system.network.slave
l1_cntrl.responseFromCache = ruby_system.network.slave
l1_cntrl.forwardToCache = ruby_system.network.master
l1_cntrl.responseToCache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@ -139,6 +144,15 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.dmaRequestToDir = ruby_system.network.master
dir_cntrl.responseFromDir = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
dir_cntrl.forwardFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
@ -155,8 +169,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the directory controllers and the network
dma_cntrl.requestToDir = ruby_system.network.master
dma_cntrl.responseFromDir = ruby_system.network.slave
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)

View file

@ -104,12 +104,17 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromL1Cache = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = ruby_system.network.slave
l1_cntrl.requestToL1Cache = ruby_system.network.master
l1_cntrl.responseToL1Cache = ruby_system.network.master
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
@ -128,10 +133,21 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = ruby_system.network.slave
l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
l2_cntrl.responseToL2Cache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
@ -164,6 +180,13 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.responseFromDir = ruby_system.network.slave
dir_cntrl.forwardFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
@ -180,11 +203,11 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)

View file

@ -124,12 +124,20 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromL1Cache = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = ruby_system.network.slave
l1_cntrl.persistentFromL1Cache = ruby_system.network.slave
l1_cntrl.requestToL1Cache = ruby_system.network.master
l1_cntrl.responseToL1Cache = ruby_system.network.master
l1_cntrl.persistentToL1Cache = ruby_system.network.master
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
@ -149,6 +157,17 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.GlobalRequestFromL2Cache = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = ruby_system.network.slave
l2_cntrl.GlobalRequestToL2Cache = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
l2_cntrl.responseToL2Cache = ruby_system.network.master
l2_cntrl.persistentToL2Cache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@ -186,6 +205,18 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.persistentToDir = ruby_system.network.master
dir_cntrl.dmaRequestToDir = ruby_system.network.master
dir_cntrl.requestFromDir = ruby_system.network.slave
dir_cntrl.responseFromDir = ruby_system.network.slave
dir_cntrl.persistentFromDir = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller

View file

@ -119,12 +119,22 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.recycle_latency = options.recycle_latency
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controller and the network
# Connect the buffers from the controller to network
l1_cntrl.requestFromCache = ruby_system.network.slave
l1_cntrl.responseFromCache = ruby_system.network.slave
l1_cntrl.unblockFromCache = ruby_system.network.slave
# Connect the buffers from the network to the controller
l1_cntrl.forwardToCache = ruby_system.network.master
l1_cntrl.responseToCache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@ -198,6 +208,17 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controller to the network
dir_cntrl.forwardFromDir = ruby_system.network.slave
dir_cntrl.responseFromDir = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = ruby_system.network.slave
dir_cntrl.unblockToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.dmaRequestToDir = ruby_system.network.master
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
@ -217,7 +238,11 @@ def create_system(options, system, dma_ports, ruby_system):
if options.recycle_latency:
dma_cntrl.recycle_latency = options.recycle_latency
# Connect the dma controller to the network
dma_cntrl.responseFromDir = ruby_system.network.slave
dma_cntrl.requestToDir = ruby_system.network.master
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)

View file

@ -91,12 +91,16 @@ def create_system(options, system, dma_ports, ruby_system):
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromCache = ruby_system.network.slave
l1_cntrl.responseFromCache = ruby_system.network.slave
l1_cntrl.forwardFromCache = ruby_system.network.slave
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
@ -114,6 +118,12 @@ def create_system(options, system, dma_ports, ruby_system):
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.forwardToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)

View file

@ -106,31 +106,7 @@ def create_system(options, system, piobus = None, dma_ports = []):
system.ruby = RubySystem(no_mem_vec = options.use_map)
ruby = system.ruby
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
try:
(cpu_sequencers, dir_cntrls, topology) = \
eval("%s.create_system(options, system, dma_ports, ruby)"
% protocol)
except:
print "Error: could not create sytem for ruby protocol %s" % protocol
raise
# Create a port proxy for connecting the system port. This is
# independent of the protocol and kept in the protocol-agnostic
# part (i.e. here).
sys_port_proxy = RubyPortProxy(ruby_system = ruby)
# Give the system port proxy a SimObject parent without creating a
# full-fledged controller
system.sys_port_proxy = sys_port_proxy
# Connect the system port for loading of binaries etc
system.system_port = system.sys_port_proxy.slave
#
# Set the network classes based on the command line options
#
if options.garnet_network == "fixed":
NetworkClass = GarnetNetwork_d
IntLinkClass = GarnetIntLink_d
@ -152,10 +128,34 @@ def create_system(options, system, piobus = None, dma_ports = []):
RouterClass = Switch
InterfaceClass = None
# Instantiate the network object so that the controllers can connect to it.
network = NetworkClass(ruby_system = ruby, topology = options.topology,
routers = [], ext_links = [], int_links = [], netifs = [])
ruby.network = network
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
try:
(cpu_sequencers, dir_cntrls, topology) = \
eval("%s.create_system(options, system, dma_ports, ruby)"
% protocol)
except:
print "Error: could not create sytem for ruby protocol %s" % protocol
raise
# Create a port proxy for connecting the system port. This is
# independent of the protocol and kept in the protocol-agnostic
# part (i.e. here).
sys_port_proxy = RubyPortProxy(ruby_system = ruby)
# Give the system port proxy a SimObject parent without creating a
# full-fledged controller
system.sys_port_proxy = sys_port_proxy
# Connect the system port for loading of binaries etc
system.system_port = system.sys_port_proxy.slave
# Create the network topology
network = NetworkClass(ruby_system = ruby, topology = topology.description,
routers = [], ext_links = [], int_links = [], netifs = [])
topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
RouterClass)
@ -168,14 +168,12 @@ def create_system(options, system, piobus = None, dma_ports = []):
network.enable_fault_model = True
network.fault_model = FaultModel()
#
# Loop through the directory controlers.
# Determine the total memory size of the ruby system and verify it is equal
# to physmem. However, if Ruby memory is using sparse memory in SE
# mode, then the system should not back-up the memory state with
# the Memory Vector and thus the memory size bytes should stay at 0.
# Also set the numa bits to the appropriate values.
#
total_mem_size = MemorySize('0B')
ruby.block_size_bytes = options.cacheline_size
@ -196,8 +194,6 @@ def create_system(options, system, piobus = None, dma_ports = []):
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(total_mem_size.value == phys_mem_size)
ruby.network = network
ruby.mem_size = total_mem_size
# Connect the cpu sequencers and the piobus

View file

@ -33,14 +33,13 @@ machine(L0Cache, "MESI Directory L0 Cache")
Cycles request_latency := 2;
Cycles response_latency := 2;
bool send_evictions;
{
// NODE L0 CACHE
// From this node's L0 cache to the network
MessageBuffer bufferToL1, network="To", physical_network="0", ordered="true";
MessageBuffer * bufferToL1, network="To", ordered="true";
// To this node's L0 cache FROM the network
MessageBuffer bufferFromL1, network="From", physical_network="0", ordered="true";
MessageBuffer * bufferFromL1, network="From", ordered="true";
{
// Message queue between this controller and the processor
MessageBuffer mandatoryQueue, ordered="false";

View file

@ -32,26 +32,30 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
Cycles l1_request_latency := 2;
Cycles l1_response_latency := 2;
Cycles to_l2_latency := 1;
{
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestToL2, network="To", virtual_network="0", ordered="false", vnet_type="request";
// a local L1 -> this L2 bank
MessageBuffer responseToL2, network="To", virtual_network="1", ordered="false", vnet_type="response";
MessageBuffer unblockToL2, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer requestFromL2, network="From", virtual_network="0", ordered="false", vnet_type="request";
// a L2 bank -> this L1
MessageBuffer responseFromL2, network="From", virtual_network="1", ordered="false", vnet_type="response";
// Message Buffers between the L1 and the L0 Cache
// From the L1 cache to the L0 cache
MessageBuffer bufferToL0, network="To", physical_network="0", ordered="true";
// From the L0 cache to the L1 cache
MessageBuffer bufferFromL0, network="From", physical_network="0", ordered="true";
MessageBuffer * bufferToL0, network="To", ordered="true";
// From the L0 cache to the L1 cache
MessageBuffer * bufferFromL0, network="From", ordered="true";
// Message queue from this L1 cache TO the network / L2
MessageBuffer * requestToL2, network="To", virtual_network="0",
ordered="false", vnet_type="request";
MessageBuffer * responseToL2, network="To", virtual_network="1",
ordered="false", vnet_type="response";
MessageBuffer * unblockToL2, network="To", virtual_network="2",
ordered="false", vnet_type="unblock";
// To this L1 cache FROM the network / L2
MessageBuffer * requestFromL2, network="From", virtual_network="2",
ordered="false", vnet_type="request";
MessageBuffer * responseFromL2, network="From", virtual_network="1",
ordered="false", vnet_type="response";
{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states

View file

@ -37,25 +37,34 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
Cycles to_l2_latency := 1;
bool send_evictions;
bool enable_prefetch := "False";
{
// NODE L1 CACHE
// Message Queues
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
ordered="false", vnet_type="request";
// a local L1 -> this L2 bank
MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="response";
MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
ordered="false", vnet_type="response";
MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
ordered="false", vnet_type="unblock";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
ordered="false", vnet_type="request";
// a L2 bank -> this L1
MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="response";
MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
ordered="false", vnet_type="response";
{
// Request Buffer for prefetches
MessageBuffer optionalQueue, ordered="false";
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states

View file

@ -26,34 +26,33 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
*
*/
machine(L2Cache, "MESI Directory L2 Cache CMP")
: CacheMemory * L2cache;
Cycles l2_request_latency := 2;
Cycles l2_response_latency := 2;
Cycles to_l1_latency := 1;
{
// L2 BANK QUEUES
// Message Queues
// From local bank of L2 cache TO the network
MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0",
MessageBuffer * DirRequestFromL2Cache, network="To", virtual_network="0",
ordered="false", vnet_type="request"; // this L2 bank -> Memory
MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0",
MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="2",
ordered="false", vnet_type="request"; // this L2 bank -> a local L1
MessageBuffer responseFromL2Cache, network="To", virtual_network="1",
MessageBuffer * responseFromL2Cache, network="To", virtual_network="1",
ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || Memory
// FROM the network to this local bank of L2 cache
MessageBuffer unblockToL2Cache, network="From", virtual_network="2",
MessageBuffer * unblockToL2Cache, network="From", virtual_network="2",
ordered="false", vnet_type="unblock"; // a local L1 || Memory -> this L2 bank
MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0",
ordered="false", vnet_type="request"; // a local L1 -> this L2 bank
MessageBuffer responseToL2Cache, network="From", virtual_network="1",
ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
ordered="false", vnet_type="request"; // a local L1 -> this L2 bank
MessageBuffer * responseToL2Cache, network="From", virtual_network="1",
ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
// Base states

View file

@ -26,27 +26,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
// This file is copied from Yasuko Watanabe's prefetch / memory protocol
// Copied here by aep 12/14/07
machine(Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles to_mem_ctrl_latency := 1;
Cycles directory_latency := 6;
{
MessageBuffer requestToDir, network="From", virtual_network="0",
ordered="false", vnet_type="request";
MessageBuffer responseToDir, network="From", virtual_network="1",
ordered="false", vnet_type="response";
MessageBuffer responseFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="response";
MessageBuffer * requestToDir, network="From", virtual_network="0",
ordered="false", vnet_type="request";
MessageBuffer * responseToDir, network="From", virtual_network="1",
ordered="false", vnet_type="response";
MessageBuffer * responseFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="response";
{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states

View file

@ -30,11 +30,12 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
Cycles request_latency := 6;
MessageBuffer * responseFromDir, network="From", virtual_network="1",
ordered="true", vnet_type="response";
MessageBuffer * requestToDir, network="To", virtual_network="0",
ordered="false", vnet_type="request";
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
@ -74,7 +75,7 @@ machine(DMA, "DMA Controller")
error("DMA does not support get data block.");
}
out_port(reqToDirectory_out, RequestMsg, reqToDirectory, desc="...");
out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
@ -106,7 +107,7 @@ machine(DMA, "DMA Controller")
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, RequestMsg, request_latency) {
enqueue(requestToDir_out, RequestMsg, request_latency) {
out_msg.Addr := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_READ;
out_msg.DataBlk := in_msg.DataBlk;
@ -119,7 +120,7 @@ machine(DMA, "DMA Controller")
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, RequestMsg, request_latency) {
enqueue(requestToDir_out, RequestMsg, request_latency) {
out_msg.Addr := in_msg.PhysicalAddress;
out_msg.Type := CoherenceRequestType:DMA_WRITE;
out_msg.DataBlk := in_msg.DataBlk;

View file

@ -28,20 +28,23 @@
*/
machine(L1Cache, "MI Example L1 Cache")
: Sequencer * sequencer;
: Sequencer * sequencer;
CacheMemory * cacheMemory;
Cycles cache_response_latency := 12;
Cycles issue_latency := 2;
bool send_evictions;
{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
MessageBuffer * requestFromCache, network="To", virtual_network="2",
ordered="true", vnet_type="request";
MessageBuffer * responseFromCache, network="To", virtual_network="4",
ordered="true", vnet_type="response";
MessageBuffer * forwardToCache, network="From", virtual_network="3",
ordered="true", vnet_type="forward";
MessageBuffer * responseToCache, network="From", virtual_network="4",
ordered="true", vnet_type="response";
{
// STATES
state_declaration(State, desc="Cache states") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";

View file

@ -28,18 +28,22 @@
*/
machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles directory_latency := 12;
MessageBuffer * forwardFromDir, network="To", virtual_network="3",
ordered="false", vnet_type="forward";
MessageBuffer * responseFromDir, network="To", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
ordered="true", vnet_type="response";
MessageBuffer * requestToDir, network="From", virtual_network="2",
ordered="true", vnet_type="request";
MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
ordered="true", vnet_type="request";
{
MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true", vnet_type="request";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states

View file

@ -28,13 +28,14 @@
*/
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
: DMASequencer * dma_sequencer;
Cycles request_latency := 6;
MessageBuffer * responseFromDir, network="From", virtual_network="1",
ordered="true", vnet_type="response";
MessageBuffer * requestToDir, network="To", virtual_network="0",
ordered="false", vnet_type="request";
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
@ -69,7 +70,7 @@ machine(DMA, "DMA Controller")
error("DMA Controller does not support getDataBlock function.\n");
}
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
@ -101,7 +102,7 @@ machine(DMA, "DMA Controller")
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
@ -116,7 +117,7 @@ machine(DMA, "DMA Controller")
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;

View file

@ -34,25 +34,24 @@ machine(L1Cache, "Directory protocol")
Cycles request_latency := 2;
Cycles use_timeout_latency := 50;
bool send_evictions;
{
// NODE L1 CACHE
// Message Queues
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
ordered="false", vnet_type="request";
// a local L1 -> this L2 bank
MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
// MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
ordered="false", vnet_type="response";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
ordered="false", vnet_type="request";
// a L2 bank -> this L1
MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
ordered="false", vnet_type="response";
{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states

View file

@ -30,20 +30,25 @@ machine(L2Cache, "Token protocol")
: CacheMemory * L2cache;
Cycles response_latency := 2;
Cycles request_latency := 2;
{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false", vnet_type="request"; // this L2 bank -> a local L1
MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request"; // this L2 bank -> mod-directory
MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="0",
ordered="false", vnet_type="request"; // this L2 bank -> a local L1
MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="1",
ordered="false", vnet_type="request"; // this L2 bank -> mod-directory
MessageBuffer * responseFromL2Cache, network="To", virtual_network="2",
ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
// FROM the network to this local bank of L2 cache
MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false", vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request"; // mod-directory -> this L2 bank
MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
// MessageBuffer L1WritebackToL2Cache, network="From", virtual_network="3", ordered="false", vnet_type="writeback";
MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
ordered="false", vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="1",
ordered="false", vnet_type="request"; // mod-directory -> this L2 bank
MessageBuffer * responseToL2Cache, network="From", virtual_network="2",
ordered="false", vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {

View file

@ -30,16 +30,19 @@ machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
MemoryControl * memBuffer;
Cycles directory_latency := 6;
// Message Queues
MessageBuffer * requestToDir, network="From", virtual_network="1",
ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
MessageBuffer * responseToDir, network="From", virtual_network="2",
ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
MessageBuffer * forwardFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="forward";
MessageBuffer * responseFromDir, network="To", virtual_network="2",
ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
{
// ** IN QUEUES **
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states

View file

@ -28,15 +28,19 @@
*/
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
: DMASequencer * dma_sequencer;
Cycles request_latency := 14;
Cycles response_latency := 14;
MessageBuffer * responseFromDir, network="From", virtual_network="2",
ordered="false", vnet_type="response";
MessageBuffer * reqToDir, network="To", virtual_network="1",
ordered="false", vnet_type="request";
MessageBuffer * respToDir, network="To", virtual_network="2",
ordered="false", vnet_type="dmaresponse";
{
MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
MessageBuffer respToDir, network="To", virtual_network="2", ordered="false", vnet_type="dmaresponse";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";

View file

@ -48,24 +48,32 @@ machine(L1Cache, "Token protocol")
bool dynamic_timeout_enabled := "True";
bool no_mig_atomic := "True";
bool send_evictions;
{
// Message Queues
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank
MessageBuffer responseFromL1Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
ordered="true", vnet_type="persistent";
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
ordered="false", vnet_type="request";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer responseToL1Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
// a L2 bank -> this L1
MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
// a L2 bank -> this L1
MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
ordered="true", vnet_type="persistent";
// a L2 bank -> this L1
MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
ordered="false", vnet_type="request";
{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states

View file

@ -32,29 +32,36 @@ machine(L2Cache, "Token protocol")
Cycles l2_request_latency := 5;
Cycles l2_response_latency := 5;
bool filtering_enabled := "True";
{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
// this L2 bank -> a local L1 || mod-directory
MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
ordered="false", vnet_type="response";
// this L2 bank -> mod-directory
MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
ordered="false", vnet_type="request";
// this L2 bank -> a local L1
MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
ordered="false", vnet_type="request";
// FROM the network to this local bank of L2 cache
// a local L1 || mod-directory -> this L2 bank
MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
ordered="true", vnet_type="persistent";
// mod-directory -> this L2 bank
MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
ordered="false", vnet_type="request";
// a local L1 -> this L2 bank
MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
ordered="false", vnet_type="request";
{
// STATES
state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
// Base states

View file

@ -34,18 +34,34 @@ machine(Directory, "Token protocol")
bool distributed_persistent := "True";
Cycles fixed_timeout_latency := 100;
Cycles reissue_wakeup_latency := 10;
// Message Queues from dir to other controllers / network
MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
ordered="true", vnet_type="response";
MessageBuffer * responseFromDir, network="To", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * persistentFromDir, network="To", virtual_network="3",
ordered="true", vnet_type="persistent";
MessageBuffer * requestFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="request";
// Message Queues to dir from other controllers / network
MessageBuffer * responseToDir, network="From", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * persistentToDir, network="From", virtual_network="3",
ordered="true", vnet_type="persistent";
MessageBuffer * requestToDir, network="From", virtual_network="2",
ordered="false", vnet_type="request";
MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
ordered="true", vnet_type="request";
{
MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_O") {
// Base states

View file

@ -28,13 +28,16 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
: DMASequencer * dma_sequencer;
Cycles request_latency := 6;
// Messsage Queues
MessageBuffer * responseFromDir, network="From", virtual_network="5",
ordered="true", vnet_type="response";
MessageBuffer * reqToDirectory, network="To", virtual_network="0",
ordered="false", vnet_type="request";
{
MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", vnet_type="response";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";

View file

@ -34,7 +34,7 @@
*/
machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
: Sequencer * sequencer;
: Sequencer * sequencer;
CacheMemory * L1Icache;
CacheMemory * L1Dcache;
CacheMemory * L2cache;
@ -43,17 +43,20 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
Cycles l2_cache_hit_latency := 10;
bool no_mig_atomic := "True";
bool send_evictions;
{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer * requestFromCache, network="To", virtual_network="2",
ordered="false", vnet_type="request";
MessageBuffer * responseFromCache, network="To", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * unblockFromCache, network="To", virtual_network="5",
ordered="false", vnet_type="unblock";
MessageBuffer * forwardToCache, network="From", virtual_network="3",
ordered="false", vnet_type="forward";
MessageBuffer * responseToCache, network="From", virtual_network="4",
ordered="false", vnet_type="response";
{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states

View file

@ -34,28 +34,37 @@
*/
machine(Directory, "AMD Hammer-like protocol")
: DirectoryMemory * directory;
: DirectoryMemory * directory;
CacheMemory * probeFilter;
MemoryControl * memBuffer;
Cycles memory_controller_latency := 2;
bool probe_filter_enabled := "False";
bool full_bit_dir_enabled := "False";
{
MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
//
MessageBuffer * forwardFromDir, network="To", virtual_network="3",
ordered="false", vnet_type="forward";
MessageBuffer * responseFromDir, network="To", virtual_network="4",
ordered="false", vnet_type="response";
// For a finite buffered network, note that the DMA response network only
// works at this relatively lower numbered (lower priority) virtual network
// because the trigger queue decouples cache responses from DMA responses.
//
MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
ordered="true", vnet_type="response";
MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
MessageBuffer * unblockToDir, network="From", virtual_network="5",
ordered="false", vnet_type="unblock";
MessageBuffer * responseToDir, network="From", virtual_network="4",
ordered="false", vnet_type="response";
MessageBuffer * requestToDir, network="From", virtual_network="2",
ordered="false", vnet_type="request", recycle_latency="1";
MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
ordered="true", vnet_type="request";
{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_E") {
// Base states

View file

@ -28,16 +28,15 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer;
: DMASequencer * dma_sequencer;
Cycles request_latency := 6;
MessageBuffer * responseFromDir, network="From", virtual_network="1",
ordered="true", vnet_type="response";
MessageBuffer * requestToDir, network="To", virtual_network="0",
ordered="false", vnet_type="request";
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response";
MessageBuffer reqToDirectory, network="To", virtual_network="0", ordered="false", vnet_type="request";
state_declaration(State,
desc="DMA states",
default="DMA_State_READY") {
state_declaration(State, desc="DMA states", default="DMA_State_READY") {
READY, AccessPermission:Invalid, desc="Ready to accept a new request";
BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
@ -71,7 +70,7 @@ machine(DMA, "DMA Controller")
error("DMA Controller does not support getDataBlock function.\n");
}
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
@ -103,7 +102,7 @@ machine(DMA, "DMA Controller")
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
@ -118,7 +117,7 @@ machine(DMA, "DMA Controller")
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
peek(dmaRequestQueue_in, SequencerMsg) {
enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
enqueue(requestToDir_out, DMARequestMsg, request_latency) {
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;

View file

@ -32,15 +32,17 @@
machine(L1Cache, "Network_test L1 Cache")
: Sequencer * sequencer;
: Sequencer * sequencer;
Cycles issue_latency := 2;
{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false", vnet_type = "request";
MessageBuffer forwardFromCache, network="To", virtual_network="1", ordered="false", vnet_type = "forward";
MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false", vnet_type = "response";
MessageBuffer * requestFromCache, network="To", virtual_network="0",
ordered="false", vnet_type = "request";
MessageBuffer * forwardFromCache, network="To", virtual_network="1",
ordered="false", vnet_type = "forward";
MessageBuffer * responseFromCache, network="To", virtual_network="2",
ordered="false", vnet_type = "response";
{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
I, AccessPermission:Invalid, desc="Not Present/Invalid";

View file

@ -32,13 +32,13 @@
machine(Directory, "Network_test Directory")
:
: MessageBuffer * requestToDir, network="From", virtual_network="0",
ordered="false", vnet_type = "request";
MessageBuffer * forwardToDir, network="From", virtual_network="1",
ordered="false", vnet_type = "forward";
MessageBuffer * responseToDir, network="From", virtual_network="2",
ordered="false", vnet_type = "response";
{
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type = "request";
MessageBuffer forwardToDir, network="From", virtual_network="1", ordered="false", vnet_type = "forward";
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type = "response";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states

View file

@ -57,19 +57,6 @@ Network::Network(const Params *p)
// Queues that are feeding the protocol
m_fromNetQueues.resize(m_nodes);
for (int node = 0; node < m_nodes; node++) {
// Setting number of virtual message buffers per Network Queue
m_toNetQueues[node].resize(m_virtual_networks);
m_fromNetQueues[node].resize(m_virtual_networks);
// Instantiating the Message Buffers that
// interact with the coherence protocol
for (int j = 0; j < m_virtual_networks; j++) {
m_toNetQueues[node][j] = new MessageBuffer();
m_fromNetQueues[node][j] = new MessageBuffer();
}
}
m_in_use.resize(m_virtual_networks);
m_ordered.resize(m_virtual_networks);
@ -95,10 +82,14 @@ Network::Network(const Params *p)
Network::~Network()
{
for (int node = 0; node < m_nodes; node++) {
// Delete the Message Buffers
for (int j = 0; j < m_virtual_networks; j++) {
delete m_toNetQueues[node][j];
delete m_fromNetQueues[node][j];
for (auto& it : m_toNetQueues[node]) {
delete it.second;
}
for (auto& it : m_fromNetQueues[node]) {
delete it.second;
}
}

View file

@ -72,11 +72,10 @@ class Network : public ClockedObject
static uint32_t MessageSizeType_to_int(MessageSizeType size_type);
// returns the queue requested for the given component
virtual MessageBuffer* getToNetQueue(NodeID id, bool ordered,
int netNumber, std::string vnet_type) = 0;
virtual MessageBuffer* getFromNetQueue(NodeID id, bool ordered,
int netNumber, std::string vnet_type) = 0;
virtual void setToNetQueue(NodeID id, bool ordered, int netNumber,
std::string vnet_type, MessageBuffer *b) = 0;
virtual void setFromNetQueue(NodeID id, bool ordered, int netNumber,
std::string vnet_type, MessageBuffer *b) = 0;
virtual void makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
LinkDirection direction,
@ -113,8 +112,8 @@ class Network : public ClockedObject
static uint32_t m_data_msg_size;
// vector of queues from the components
std::vector<std::vector<MessageBuffer*> > m_toNetQueues;
std::vector<std::vector<MessageBuffer*> > m_fromNetQueues;
std::vector<std::map<int, MessageBuffer*> > m_toNetQueues;
std::vector<std::map<int, MessageBuffer*> > m_fromNetQueues;
std::vector<bool> m_in_use;
std::vector<bool> m_ordered;

View file

@ -28,7 +28,6 @@
# Brad Beckmann
from m5.params import *
from m5.SimObject import SimObject
from ClockedObject import ClockedObject
from BasicLink import BasicLink
@ -48,3 +47,6 @@ class RubyNetwork(ClockedObject):
netifs = VectorParam.ClockedObject("Network Interfaces")
ext_links = VectorParam.BasicExtLink("Links to external nodes")
int_links = VectorParam.BasicIntLink("Links between internal nodes")
slave = VectorSlavePort("CPU slave port")
master = VectorMasterPort("CPU master port")

View file

@ -66,20 +66,20 @@ BaseGarnetNetwork::init()
Network::init();
}
MessageBuffer*
BaseGarnetNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
string vnet_type)
void
BaseGarnetNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num, vnet_type);
return m_toNetQueues[id][network_num];
m_toNetQueues[id][network_num] = b;
}
MessageBuffer*
BaseGarnetNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
string vnet_type)
void
BaseGarnetNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num, vnet_type);
return m_fromNetQueues[id][network_num];
m_fromNetQueues[id][network_num] = b;
}
void

View file

@ -68,12 +68,11 @@ class BaseGarnetNetwork : public Network
m_queueing_latency[vnet] += latency;
}
// returns the queue requested for the given component
MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type);
MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type);
// set the queue
void setToNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type, MessageBuffer *b);
void setFromNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type, MessageBuffer *b);
bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }

View file

@ -53,8 +53,6 @@ NetworkInterface_d::NetworkInterface_d(const Params *p)
m_vc_round_robin = 0;
m_ni_buffers.resize(m_num_vcs);
m_ni_enqueue_time.resize(m_num_vcs);
inNode_ptr.resize(m_virtual_networks);
outNode_ptr.resize(m_virtual_networks);
creditQueue = new flitBuffer_d();
// instantiating the NI flit buffers
@ -108,18 +106,20 @@ NetworkInterface_d::addOutPort(NetworkLink_d *out_link,
}
void
NetworkInterface_d::addNode(vector<MessageBuffer *>& in,
vector<MessageBuffer *>& out)
NetworkInterface_d::addNode(map<int, MessageBuffer *>& in,
map<int, MessageBuffer *>& out)
{
assert(in.size() == m_virtual_networks);
inNode_ptr = in;
outNode_ptr = out;
for (int j = 0; j < m_virtual_networks; j++) {
for (auto& it : in) {
// the protocol injects messages into the NI
inNode_ptr[j]->setConsumer(this);
inNode_ptr[j]->setReceiver(this);
outNode_ptr[j]->setSender(this);
it.second->setConsumer(this);
it.second->setReceiver(this);
}
for (auto& it : out) {
it.second->setSender(this);
}
}
@ -223,11 +223,14 @@ NetworkInterface_d::wakeup()
// Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
while (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
int vnet = (*it).first;
MessageBuffer *b = (*it).second;
while (b->isReady()) { // Is there a message waiting
msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
inNode_ptr[vnet]->dequeue();
b->dequeue();
} else {
break;
}
@ -351,12 +354,15 @@ NetworkInterface_d::get_vnet(int vc)
void
NetworkInterface_d::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
for (const auto& it : inNode_ptr) {
MessageBuffer *b = it.second;
while (b->isReady()) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));

View file

@ -60,8 +60,9 @@ class NetworkInterface_d : public ClockedObject, public Consumer
void addOutPort(NetworkLink_d *out_link, CreditLink_d *credit_link);
void wakeup();
void addNode(std::vector<MessageBuffer *> &inNode,
std::vector<MessageBuffer *> &outNode);
void addNode(std::map<int, MessageBuffer *> &inNode,
std::map<int, MessageBuffer *> &outNode);
void print(std::ostream& out) const;
int get_vnet(int vc);
void init_net_ptr(GarnetNetwork_d *net_ptr) { m_net_ptr = net_ptr; }
@ -89,9 +90,9 @@ class NetworkInterface_d : public ClockedObject, public Consumer
std::vector<Cycles> m_ni_enqueue_time;
// The Message buffers that takes messages from the protocol
std::vector<MessageBuffer *> inNode_ptr;
std::map<int, MessageBuffer *> inNode_ptr;
// The Message buffers that provides messages to the protocol
std::vector<MessageBuffer *> outNode_ptr;
std::map<int, MessageBuffer *> outNode_ptr;
bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
int calculateVC(int vnet);

View file

@ -49,13 +49,10 @@ NetworkInterface::NetworkInterface(const Params *p)
m_virtual_networks = p->virt_nets;
m_vc_per_vnet = p->vcs_per_vnet;
m_num_vcs = m_vc_per_vnet*m_virtual_networks;
m_vc_round_robin = 0;
m_ni_buffers.resize(m_num_vcs);
inNode_ptr.resize(m_virtual_networks);
outNode_ptr.resize(m_virtual_networks);
// instantiating the NI flit buffers
m_ni_buffers.resize(m_num_vcs);
for (int i =0; i < m_num_vcs; i++)
m_ni_buffers[i] = new flitBuffer();
@ -93,18 +90,20 @@ NetworkInterface::addOutPort(NetworkLink *out_link)
}
void
NetworkInterface::addNode(vector<MessageBuffer*>& in,
vector<MessageBuffer*>& out)
NetworkInterface::addNode(map<int, MessageBuffer*>& in,
map<int, MessageBuffer*>& out)
{
assert(in.size() == m_virtual_networks);
inNode_ptr = in;
outNode_ptr = out;
// protocol injects messages into the NI
for (int j = 0; j < m_virtual_networks; j++) {
inNode_ptr[j]->setConsumer(this);
inNode_ptr[j]->setReceiver(this);
outNode_ptr[j]->setSender(this);
for (auto& it: in) {
// the protocol injects messages into the NI
it.second->setConsumer(this);
it.second->setReceiver(this);
}
for (auto& it : out) {
it.second->setSender(this);
}
}
@ -243,12 +242,14 @@ NetworkInterface::wakeup()
//Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
while (inNode_ptr[vnet]->isReady()) // Is there a message waiting
{
msg_ptr = inNode_ptr[vnet]->peekMsgPtr();
for (auto it = inNode_ptr.begin(); it != inNode_ptr.end(); ++it) {
int vnet = (*it).first;
MessageBuffer *b = (*it).second;
while (b->isReady()) { // Is there a message waiting
msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
inNode_ptr[vnet]->dequeue();
b->dequeue();
} else {
break;
}
@ -324,14 +325,17 @@ NetworkInterface::scheduleOutputLink()
void
NetworkInterface::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
for (const auto& it : inNode_ptr) {
MessageBuffer *b = it.second;
while (b->isReady()) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext(curCycle())) {
if (m_ni_buffers[vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
return;
}

View file

@ -56,10 +56,10 @@ class NetworkInterface : public ClockedObject, public FlexibleConsumer
void addInPort(NetworkLink *in_link);
void addOutPort(NetworkLink *out_link);
void addNode(std::map<int, MessageBuffer *> &inNode,
std::map<int, MessageBuffer *> &outNode);
void wakeup();
void addNode(std::vector<MessageBuffer *> &inNode,
std::vector<MessageBuffer *> &outNode);
void grant_vc(int out_port, int vc, Cycles grant_time);
void release_vc(int out_port, int vc, Cycles release_time);
@ -93,10 +93,10 @@ class NetworkInterface : public ClockedObject, public FlexibleConsumer
std::vector<flitBuffer *> m_ni_buffers;
// The Message buffers that takes messages from the protocol
std::vector<MessageBuffer *> inNode_ptr;
std::map<int, MessageBuffer *> inNode_ptr;
// The Message buffers that provides messages to the protocol
std::vector<MessageBuffer *> outNode_ptr;
std::map<int, MessageBuffer *> outNode_ptr;
bool flitisizeMessage(MsgPtr msg_ptr, int vnet);
int calculateVC(int vnet);

View file

@ -387,7 +387,7 @@ Router::checkReschedule()
{
for (int port = 0; port < m_out_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
if (m_router_buffers[port][vc]->isReady(curCycle() + Cycles(1))) {
scheduleEvent(Cycles(1));
return;
}

View file

@ -61,17 +61,6 @@ flitBuffer::isReady(Cycles curTime)
return false;
}
bool
flitBuffer::isReadyForNext(Cycles curTime)
{
if (m_buffer.size() != 0 ) {
flit *t_flit = m_buffer.front();
if (t_flit->get_time() <= (curTime + 1))
return true;
}
return false;
}
bool
flitBuffer::isFull()
{

View file

@ -44,7 +44,6 @@ class flitBuffer
flitBuffer(int maximum_size);
bool isReady(Cycles curTime);
bool isReadyForNext(Cycles curTime);
bool isFull();
bool isEmpty();
void setMaxSize(int maximum);

View file

@ -61,36 +61,33 @@ PerfectSwitch::init(SimpleNetwork *network_ptr)
{
m_network_ptr = network_ptr;
for(int i = 0;i < m_virtual_networks;++i)
{
for(int i = 0;i < m_virtual_networks;++i) {
m_pending_message_count.push_back(0);
}
}
void
PerfectSwitch::addInPort(const vector<MessageBuffer*>& in)
PerfectSwitch::addInPort(const map<int, MessageBuffer*>& in)
{
assert(in.size() == m_virtual_networks);
NodeID port = m_in.size();
m_in.push_back(in);
for (int j = 0; j < m_virtual_networks; j++) {
m_in[port][j]->setConsumer(this);
for (auto& it : in) {
it.second->setConsumer(this);
string desc = csprintf("[Queue from port %s %s %s to PerfectSwitch]",
to_string(m_switch_id), to_string(port), to_string(j));
m_in[port][j]->setDescription(desc);
m_in[port][j]->setIncomingLink(port);
m_in[port][j]->setVnet(j);
to_string(m_switch_id), to_string(port), to_string(it.first));
it.second->setDescription(desc);
it.second->setIncomingLink(port);
it.second->setVnet(it.first);
}
}
void
PerfectSwitch::addOutPort(const vector<MessageBuffer*>& out,
PerfectSwitch::addOutPort(const map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry)
{
assert(out.size() == m_virtual_networks);
// Setup link order
LinkOrder l;
l.m_value = 0;
@ -152,11 +149,16 @@ PerfectSwitch::wakeup()
vector<NetDest> output_link_destinations;
// Is there a message waiting?
while (m_in[incoming][vnet]->isReady()) {
auto it = m_in[incoming].find(vnet);
if (it == m_in[incoming].end())
continue;
MessageBuffer *buffer = (*it).second;
while (buffer->isReady()) {
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// Peek at message
msg_ptr = m_in[incoming][vnet]->peekMsgPtr();
msg_ptr = buffer->peekMsgPtr();
net_msg_ptr = safe_cast<NetworkMessage*>(msg_ptr.get());
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
@ -261,7 +263,7 @@ PerfectSwitch::wakeup()
}
// Dequeue msg
m_in[incoming][vnet]->dequeue();
buffer->dequeue();
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues

View file

@ -65,9 +65,10 @@ class PerfectSwitch : public Consumer
{ return csprintf("PerfectSwitch-%i", m_switch_id); }
void init(SimpleNetwork *);
void addInPort(const std::vector<MessageBuffer*>& in);
void addOutPort(const std::vector<MessageBuffer*>& out,
void addInPort(const std::map<int, MessageBuffer*>& in);
void addOutPort(const std::map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry);
int getInLinks() const { return m_in.size(); }
int getOutLinks() const { return m_out.size(); }
@ -86,8 +87,9 @@ class PerfectSwitch : public Consumer
SwitchID m_switch_id;
// vector of queues from the components
std::vector<std::vector<MessageBuffer*> > m_in;
std::vector<std::vector<MessageBuffer*> > m_out;
std::vector<std::map<int, MessageBuffer*> > m_in;
std::vector<std::map<int, MessageBuffer*> > m_out;
std::vector<NetDest> m_routing_table;
std::vector<LinkOrder> m_link_order;

View file

@ -93,8 +93,7 @@ SimpleNetwork::makeOutLink(SwitchID src, NodeID dest, BasicLink* link,
SimpleExtLink *simple_link = safe_cast<SimpleExtLink*>(link);
m_switches[src]->addOutPort(m_fromNetQueues[dest],
routing_table_entry,
m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
simple_link->m_latency,
simple_link->m_bw_multiplier);
@ -118,18 +117,21 @@ SimpleNetwork::makeInternalLink(SwitchID src, SwitchID dest, BasicLink* link,
const NetDest& routing_table_entry)
{
// Create a set of new MessageBuffers
std::vector<MessageBuffer*> queues;
std::map<int, MessageBuffer*> queues;
for (int i = 0; i < m_virtual_networks; i++) {
// allocate a buffer
MessageBuffer* buffer_ptr = new MessageBuffer;
buffer_ptr->setOrdering(true);
if (m_buffer_size > 0) {
buffer_ptr->resize(m_buffer_size);
}
queues.push_back(buffer_ptr);
queues[i] = buffer_ptr;
// remember to deallocate it
m_buffers_to_free.push_back(buffer_ptr);
}
// Connect it to the two switches
SimpleIntLink *simple_link = safe_cast<SimpleIntLink*>(link);
@ -151,20 +153,20 @@ SimpleNetwork::checkNetworkAllocation(NodeID id, bool ordered, int network_num)
m_in_use[network_num] = true;
}
MessageBuffer*
SimpleNetwork::getToNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type)
void
SimpleNetwork::setToNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num);
return m_toNetQueues[id][network_num];
m_toNetQueues[id][network_num] = b;
}
MessageBuffer*
SimpleNetwork::getFromNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type)
void
SimpleNetwork::setFromNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type, MessageBuffer *b)
{
checkNetworkAllocation(id, ordered, network_num);
return m_fromNetQueues[id][network_num];
m_fromNetQueues[id][network_num] = b;
}
void

View file

@ -56,9 +56,11 @@ class SimpleNetwork : public Network
void collateStats();
void regStats();
// returns the queue requested for the given component
MessageBuffer* getToNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
MessageBuffer* getFromNetQueue(NodeID id, bool ordered, int network_num, std::string vnet_type);
// sets the queue requested
void setToNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type, MessageBuffer *b);
void setFromNetQueue(NodeID id, bool ordered, int network_num,
std::string vnet_type, MessageBuffer *b);
bool isVNetOrdered(int vnet) { return m_ordered[vnet]; }
bool validVirtualNetwork(int vnet) { return m_in_use[vnet]; }
@ -89,6 +91,7 @@ class SimpleNetwork : public Network
// Private copy constructor and assignment operator
SimpleNetwork(const SimpleNetwork& obj);
SimpleNetwork& operator=(const SimpleNetwork& obj);
std::vector<Switch*> m_switches;
std::vector<MessageBuffer*> m_buffers_to_free;
std::vector<Switch*> m_endpoint_switches;

View file

@ -64,29 +64,33 @@ Switch::init()
}
void
Switch::addInPort(const vector<MessageBuffer*>& in)
Switch::addInPort(const map<int, MessageBuffer*>& in)
{
m_perfect_switch->addInPort(in);
for (int i = 0; i < in.size(); i++) {
in[i]->setReceiver(this);
for (auto& it : in) {
it.second->setReceiver(this);
}
}
void
Switch::addOutPort(const vector<MessageBuffer*>& out,
const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
Switch::addOutPort(const map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry,
Cycles link_latency, int bw_multiplier)
{
// Create a throttle
Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
link_latency, bw_multiplier,
m_network_ptr->getEndpointBandwidth(),
this);
m_throttles.push_back(throttle_ptr);
// Create one buffer per vnet (these are intermediaryQueues)
vector<MessageBuffer*> intermediateBuffers;
for (int i = 0; i < out.size(); i++) {
out[i]->setSender(this);
map<int, MessageBuffer*> intermediateBuffers;
for (auto& it : out) {
it.second->setSender(this);
MessageBuffer* buffer_ptr = new MessageBuffer;
// Make these queues ordered
@ -95,7 +99,7 @@ Switch::addOutPort(const vector<MessageBuffer*>& out,
buffer_ptr->resize(m_network_ptr->getBufferSize());
}
intermediateBuffers.push_back(buffer_ptr);
intermediateBuffers[it.first] = buffer_ptr;
m_buffers_to_free.push_back(buffer_ptr);
buffer_ptr->setSender(this);

View file

@ -60,12 +60,13 @@ class Switch : public BasicRouter
typedef SwitchParams Params;
Switch(const Params *p);
~Switch();
void init();
void addInPort(const std::vector<MessageBuffer*>& in);
void addOutPort(const std::vector<MessageBuffer*>& out,
const NetDest& routing_table_entry, Cycles link_latency,
int bw_multiplier);
void addInPort(const std::map<int, MessageBuffer*>& in);
void addOutPort(const std::map<int, MessageBuffer*>& out,
const NetDest& routing_table_entry,
Cycles link_latency, int bw_multiplier);
const Throttle* getThrottle(LinkID link_number) const;
void resetStats();

View file

@ -69,83 +69,59 @@ Throttle::init(NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth)
{
m_node = node;
m_vnets = 0;
assert(link_bandwidth_multiplier > 0);
m_link_bandwidth_multiplier = link_bandwidth_multiplier;
m_link_latency = link_latency;
m_endpoint_bandwidth = endpoint_bandwidth;
m_wakeups_wo_switch = 0;
m_link_utilization_proxy = 0;
}
void
Throttle::addLinks(const std::vector<MessageBuffer*>& in_vec,
const std::vector<MessageBuffer*>& out_vec)
Throttle::addLinks(const map<int, MessageBuffer*>& in_vec,
const map<int, MessageBuffer*>& out_vec)
{
assert(in_vec.size() == out_vec.size());
for (int i=0; i<in_vec.size(); i++) {
addVirtualNetwork(in_vec[i], out_vec[i]);
}
}
void
Throttle::addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr)
{
m_units_remaining.push_back(0);
m_in.push_back(in_ptr);
m_out.push_back(out_ptr);
for (auto& it : in_vec) {
int vnet = it.first;
auto jt = out_vec.find(vnet);
assert(jt != out_vec.end());
MessageBuffer *in_ptr = it.second;
MessageBuffer *out_ptr = (*jt).second;
m_in[vnet] = in_ptr;
m_out[vnet] = out_ptr;
m_units_remaining[vnet] = 0;
// Set consumer and description
m_in[m_vnets]->setConsumer(this);
in_ptr->setConsumer(this);
string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
to_string(m_node) + "]";
m_in[m_vnets]->setDescription(desc);
m_vnets++;
in_ptr->setDescription(desc);
}
}
void
Throttle::wakeup()
Throttle::operateVnet(int vnet, int &bw_remaining, bool &schedule_wakeup,
MessageBuffer *in, MessageBuffer *out)
{
// Limits the number of message sent to a limited number of bytes/cycle.
assert(getLinkBandwidth() > 0);
int bw_remaining = getLinkBandwidth();
// Give the highest numbered link priority most of the time
m_wakeups_wo_switch++;
int highest_prio_vnet = m_vnets-1;
int lowest_prio_vnet = 0;
int counter = 1;
bool schedule_wakeup = false;
// invert priorities to avoid starvation seen in the component network
if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
m_wakeups_wo_switch = 0;
highest_prio_vnet = 0;
lowest_prio_vnet = m_vnets-1;
counter = -1;
}
for (int vnet = highest_prio_vnet;
(vnet * counter) >= (counter * lowest_prio_vnet);
vnet -= counter) {
assert(m_out[vnet] != NULL);
assert(m_in[vnet] != NULL);
assert(out != NULL);
assert(in != NULL);
assert(m_units_remaining[vnet] >= 0);
while (bw_remaining > 0 &&
(m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
m_out[vnet]->areNSlotsAvailable(1)) {
while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
out->areNSlotsAvailable(1)) {
// See if we are done transferring the previous message on
// this virtual network
if (m_units_remaining[vnet] == 0 && m_in[vnet]->isReady()) {
if (m_units_remaining[vnet] == 0 && in->isReady()) {
// Find the size of the message we are moving
MsgPtr msg_ptr = m_in[vnet]->peekMsgPtr();
MsgPtr msg_ptr = in->peekMsgPtr();
NetworkMessage* net_msg_ptr =
safe_cast<NetworkMessage*>(msg_ptr.get());
m_units_remaining[vnet] +=
@ -157,13 +133,12 @@ Throttle::wakeup()
g_system_ptr->curCycle());
// Move the message
m_in[vnet]->dequeue();
m_out[vnet]->enqueue(msg_ptr, m_link_latency);
in->dequeue();
out->enqueue(msg_ptr, m_link_latency);
// Count the message
m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
DPRINTF(RubyNetwork, "%s\n", *m_out[vnet]);
DPRINTF(RubyNetwork, "%s\n", *out);
}
// Calculate the amount of bandwidth we spent on this message
@ -172,14 +147,48 @@ Throttle::wakeup()
bw_remaining = max(0, -diff);
}
if (bw_remaining > 0 &&
(m_in[vnet]->isReady() || m_units_remaining[vnet] > 0) &&
!m_out[vnet]->areNSlotsAvailable(1)) {
if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
!out->areNSlotsAvailable(1)) {
DPRINTF(RubyNetwork, "vnet: %d", vnet);
// schedule me to wakeup again because I'm waiting for my
// output queue to become available
schedule_wakeup = true;
}
}
void
Throttle::wakeup()
{
// Limits the number of message sent to a limited number of bytes/cycle.
assert(getLinkBandwidth() > 0);
int bw_remaining = getLinkBandwidth();
m_wakeups_wo_switch++;
bool schedule_wakeup = false;
// variable for deciding the direction in which to iterate
bool iteration_direction = false;
// invert priorities to avoid starvation seen in the component network
if (m_wakeups_wo_switch > PRIORITY_SWITCH_LIMIT) {
m_wakeups_wo_switch = 0;
iteration_direction = true;
}
if (iteration_direction) {
for (auto& it : m_in) {
int vnet = it.first;
operateVnet(vnet, bw_remaining, schedule_wakeup,
it.second, m_out[vnet]);
}
} else {
for (auto it = m_in.rbegin(); it != m_in.rend(); ++it) {
int vnet = (*it).first;
operateVnet(vnet, bw_remaining, schedule_wakeup,
(*it).second, m_out[vnet]);
}
}
// We should only wake up when we use the bandwidth
@ -215,7 +224,7 @@ Throttle::regStats(string parent)
for (MessageSizeType type = MessageSizeType_FIRST;
type < MessageSizeType_NUM; ++type) {
m_msg_counts[(unsigned int)type]
.init(m_vnets)
.init(Network::getNumberOfVirtualNetworks())
.name(parent + csprintf(".throttle%i", m_node) + ".msg_count." +
MessageSizeType_to_string(type))
.flags(Stats::nozero)

View file

@ -62,8 +62,8 @@ class Throttle : public Consumer
std::string name()
{ return csprintf("Throttle-%i", m_sID); }
void addLinks(const std::vector<MessageBuffer*>& in_vec,
const std::vector<MessageBuffer*>& out_vec);
void addLinks(const std::map<int, MessageBuffer*>& in_vec,
const std::map<int, MessageBuffer*>& out_vec);
void wakeup();
// The average utilization (a fraction) since last clearStats()
@ -85,16 +85,17 @@ class Throttle : public Consumer
private:
void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth);
void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr);
void operateVnet(int vnet, int &bw_remainin, bool &schedule_wakeup,
MessageBuffer *in, MessageBuffer *out);
// Private copy constructor and assignment operator
Throttle(const Throttle& obj);
Throttle& operator=(const Throttle& obj);
std::vector<MessageBuffer*> m_in;
std::vector<MessageBuffer*> m_out;
unsigned int m_vnets;
std::vector<int> m_units_remaining;
std::map<int, MessageBuffer*> m_in;
std::map<int, MessageBuffer*> m_out;
std::map<int, int> m_units_remaining;
int m_sID;
NodeID m_node;
int m_link_bandwidth_multiplier;

View file

@ -88,13 +88,6 @@ AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
m_delayVCHistogram[virtualNetwork]->sample(delay);
}
void
AbstractController::connectWithPeer(AbstractController *c)
{
getQueuesFromPeer(c);
c->getQueuesFromPeer(this);
}
void
AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
{

View file

@ -96,6 +96,9 @@ class AbstractController : public ClockedObject, public Consumer
virtual void collateStats()
{fatal("collateStats() should be overridden!");}
//! Set the message buffer with given name.
virtual void setNetQueue(const std::string& name, MessageBuffer *b) = 0;
public:
MachineID getMachineID() const { return m_machineID; }
@ -103,25 +106,12 @@ class AbstractController : public ClockedObject, public Consumer
Stats::Histogram& getDelayVCHist(uint32_t index)
{ return *(m_delayVCHistogram[index]); }
MessageBuffer *getPeerQueue(uint32_t pid)
{
std::map<uint32_t, MessageBuffer *>::iterator it =
peerQueueMap.find(pid);
assert(it != peerQueueMap.end());
return (*it).second;
}
protected:
//! Profiles original cache requests including PUTs
void profileRequest(const std::string &request);
//! Profiles the delay associated with messages.
void profileMsgDelay(uint32_t virtualNetwork, Cycles delay);
//! Function for connecting peer controllers
void connectWithPeer(AbstractController *);
virtual void getQueuesFromPeer(AbstractController *)
{ fatal("getQueuesFromPeer() should be called only if implemented!"); }
void stallBuffer(MessageBuffer* buf, Address addr);
void wakeUpBuffers(Address addr);
void wakeUpAllBuffers(Address addr);
@ -147,9 +137,6 @@ class AbstractController : public ClockedObject, public Consumer
unsigned int m_buffer_size;
Cycles m_recycle_latency;
//! Map from physical network number to the Message Buffer.
std::map<uint32_t, MessageBuffer*> peerQueueMap;
//! Counter for the number of cycles when the transitions carried out
//! were equal to the maximum allowed
Stats::Scalar m_fully_busy_cycles;

View file

@ -51,7 +51,12 @@ class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None
# Data members in the State Machine that have been declared before
# the opening brace '{' of the machine. Note that these along with
# the members in self.objects form the entire set of data members.
self.config_parameters = config_parameters
self.prefetchers = []
for param in config_parameters:
@ -74,6 +79,10 @@ class StateMachine(Symbol):
self.transitions = []
self.in_ports = []
self.functions = []
# Data members in the State Machine that have been declared inside
# the {} machine. Note that these along with the config params
# form the entire set of data members of the machine.
self.objects = []
self.TBEType = None
self.EntryType = None
@ -200,7 +209,13 @@ class $py_ident(RubyController):
if param.rvalue is not None:
dflt_str = str(param.rvalue.inline()) + ', '
if python_class_map.has_key(param.type_ast.type.c_ident):
if param.type_ast.type.c_ident == "MessageBuffer":
if param["network"] == "To":
code('${{param.ident}} = MasterPort(${dflt_str}"")')
else:
code('${{param.ident}} = SlavePort(${dflt_str}"")')
elif python_class_map.has_key(param.type_ast.type.c_ident):
python_type = python_class_map[param.type_ast.type.c_ident]
code('${{param.ident}} = Param.${{python_type}}(${dflt_str}"")')
@ -241,12 +256,9 @@ class $py_ident(RubyController):
''')
seen_types = set()
has_peer = False
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
if "network" in var and "physical_network" in var:
has_peer = True
seen_types.add(var.type.ident)
# for adding information to the protocol debug trace
@ -260,7 +272,9 @@ class $c_ident : public AbstractController
$c_ident(const Params *p);
static int getNumControllers();
void init();
MessageBuffer* getMandatoryQueue() const;
void setNetQueue(const std::string& name, MessageBuffer *b);
void print(std::ostream& out) const;
void wakeup();
@ -340,8 +354,6 @@ static int m_num_controllers;
if proto:
code('$proto')
if has_peer:
code('void getQueuesFromPeer(AbstractController *);')
if self.EntryType != None:
code('''
@ -404,7 +416,6 @@ void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr);
code = self.symtab.codeFormatter()
ident = self.ident
c_ident = "%s_Controller" % self.ident
has_peer = False
code('''
/** \\file $c_ident.cc
@ -486,10 +497,17 @@ $c_ident::$c_ident(const Params *p)
# include a sequencer, connect the it to the controller.
#
for param in self.config_parameters:
# Do not initialize messgage buffers since they are initialized
# when the port based connections are made.
if param.type_ast.type.c_ident == "MessageBuffer":
continue
if param.pointer:
code('m_${{param.ident}}_ptr = p->${{param.ident}};')
else:
code('m_${{param.ident}} = p->${{param.ident}};')
if re.compile("sequencer").search(param.ident):
code('m_${{param.ident}}_ptr->setController(this);')
@ -498,20 +516,9 @@ $c_ident::$c_ident(const Params *p)
code('''
m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
m_${{var.ident}}_ptr->setReceiver(this);
''')
else:
if "network" in var and "physical_network" in var and \
var["network"] == "To":
has_peer = True
code('''
m_${{var.ident}}_ptr = new ${{var.type.c_ident}}();
peerQueueMap[${{var["physical_network"]}}] = m_${{var.ident}}_ptr;
m_${{var.ident}}_ptr->setSender(this);
''')
code('''
if (p->peer != NULL)
connectWithPeer(p->peer);
for (int state = 0; state < ${ident}_State_NUM; state++) {
for (int event = 0; event < ${ident}_Event_NUM; event++) {
@ -528,16 +535,92 @@ for (int event = 0; event < ${ident}_Event_NUM; event++) {
}
void
$c_ident::init()
$c_ident::setNetQueue(const std::string& name, MessageBuffer *b)
{
MachineType machine_type = string_to_MachineType("${{var.machine.ident}}");
MachineType machine_type = string_to_MachineType("${{self.ident}}");
int base M5_VAR_USED = MachineType_base_number(machine_type);
''')
code.indent()
# set for maintaining the vnet, direction pairs already seen for this
# machine. This map helps in implementing the check for avoiding
# multiple message buffers being mapped to the same vnet.
vnet_dir_set = set()
for var in self.config_parameters:
if "network" in var:
vtype = var.type_ast.type
vid = "m_%s_ptr" % var.ident
code('''
if ("${{var.ident}}" == name) {
$vid = b;
assert($vid != NULL);
''')
code.indent()
# Network port object
network = var["network"]
ordered = var["ordered"]
if "virtual_network" in var:
vnet = var["virtual_network"]
vnet_type = var["vnet_type"]
assert (vnet, network) not in vnet_dir_set
vnet_dir_set.add((vnet,network))
code('''
m_net_ptr->set${network}NetQueue(m_version + base, $ordered, $vnet,
"$vnet_type", b);
''')
# Set the end
if network == "To":
code('$vid->setSender(this);')
else:
code('$vid->setReceiver(this);')
# Set ordering
code('$vid->setOrdering(${{var["ordered"]}});')
# Set randomization
if "random" in var:
# A buffer
code('$vid->setRandomization(${{var["random"]}});')
# Set Priority
if "rank" in var:
code('$vid->setPriority(${{var["rank"]}})')
# Set buffer size
code('$vid->resize(m_buffer_size);')
if "recycle_latency" in var:
code('$vid->setRecycleLatency( ' \
'Cycles(${{var["recycle_latency"]}}));')
else:
code('$vid->setRecycleLatency(m_recycle_latency);')
# set description (may be overriden later by port def)
code('''
$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
''')
code.dedent()
code('}\n')
code.dedent()
code('''
}
void
$c_ident::init()
{
// initialize objects
''')
code.indent()
for var in self.objects:
vtype = var.type
vid = "m_%s_ptr" % var.ident
@ -589,55 +672,6 @@ $c_ident::init()
code('$vid->setSender(this);')
code('$vid->setReceiver(this);')
else:
# Network port object
network = var["network"]
ordered = var["ordered"]
if "virtual_network" in var:
vnet = var["virtual_network"]
vnet_type = var["vnet_type"]
assert var.machine is not None
code('''
$vid = m_net_ptr->get${network}NetQueue(m_version + base, $ordered, $vnet, "$vnet_type");
assert($vid != NULL);
''')
# Set the end
if network == "To":
code('$vid->setSender(this);')
else:
code('$vid->setReceiver(this);')
# Set ordering
if "ordered" in var:
# A buffer
code('$vid->setOrdering(${{var["ordered"]}});')
# Set randomization
if "random" in var:
# A buffer
code('$vid->setRandomization(${{var["random"]}});')
# Set Priority
if "rank" in var:
code('$vid->setPriority(${{var["rank"]}})')
# Set buffer size
if vtype.isBuffer:
code('''
if (m_buffer_size > 0) {
$vid->resize(m_buffer_size);
}
''')
# set description (may be overriden later by port def)
code('''
$vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{var.ident}}]");
''')
if vtype.isBuffer:
if "recycle_latency" in var:
code('$vid->setRecycleLatency( ' \
@ -965,6 +999,13 @@ $c_ident::functionalReadBuffers(PacketPtr& pkt)
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('if ($vid->functionalRead(pkt)) { return true; }')
for var in self.config_parameters:
vtype = var.type_ast.type
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('if ($vid->functionalRead(pkt)) { return true; }')
code('''
return false;
}
@ -982,31 +1023,18 @@ $c_ident::functionalWriteBuffers(PacketPtr& pkt)
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('num_functional_writes += $vid->functionalWrite(pkt);')
for var in self.config_parameters:
vtype = var.type_ast.type
if vtype.isBuffer:
vid = "m_%s_ptr" % var.ident
code('num_functional_writes += $vid->functionalWrite(pkt);')
code('''
return num_functional_writes;
}
''')
# Check if this controller has a peer, if yes then write the
# function for connecting to the peer.
if has_peer:
code('''
void
$c_ident::getQueuesFromPeer(AbstractController *peer)
{
''')
for var in self.objects:
if "network" in var and "physical_network" in var and \
var["network"] == "From":
code('''
m_${{var.ident}}_ptr = peer->getPeerQueue(${{var["physical_network"]}});
assert(m_${{var.ident}}_ptr != NULL);
m_${{var.ident}}_ptr->setReceiver(this);
''')
code('}')
code.write(path, "%s.cc" % c_ident)
def printCWakeup(self, path, includes):

View file

@ -39,6 +39,7 @@
#include "dev/etherdevice.hh"
#include "dev/etherobject.hh"
#endif
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/mem_object.hh"
#include "python/swig/pyobject.hh"
#include "sim/full_system.hh"
@ -98,6 +99,27 @@ connectPorts(SimObject *o1, const std::string &name1, int i1,
}
}
#endif
// These could be objects from the ruby memory system. If yes, then at
// least one of them should be an abstract controller. Do a type check.
AbstractController *ac1, *ac2;
ac1 = dynamic_cast<AbstractController*>(o1);
ac2 = dynamic_cast<AbstractController*>(o2);
if (ac1 || ac2) {
MessageBuffer *b = new MessageBuffer();
// set the message buffer associated with the provided names
if (ac1) {
ac1->setNetQueue(name1, b);
}
if (ac2) {
ac2->setNetQueue(name2, b);
}
return 1;
}
MemObject *mo1, *mo2;
mo1 = dynamic_cast<MemObject*>(o1);
mo2 = dynamic_cast<MemObject*>(o2);