ruby: MESI_CMP_directory updated to the new config system
This commit is contained in:
parent
3e286d825d
commit
faa76fc248
6 changed files with 215 additions and 105 deletions
151
configs/ruby/MESI_CMP_directory.py
Normal file
151
configs/ruby/MESI_CMP_directory.py
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
|
||||||
|
# Copyright (c) 2009 Advanced Micro Devices, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met: redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer;
|
||||||
|
# redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution;
|
||||||
|
# neither the name of the copyright holders nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
# Authors: Brad Beckmann
|
||||||
|
|
||||||
|
import math
|
||||||
|
import m5
|
||||||
|
from m5.objects import *
|
||||||
|
from m5.defines import buildEnv
|
||||||
|
from m5.util import addToPath
|
||||||
|
|
||||||
|
#
|
||||||
|
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
|
||||||
|
#
|
||||||
|
class L1Cache(RubyCache):
|
||||||
|
latency = 3
|
||||||
|
|
||||||
|
#
|
||||||
|
# Note: the L2 Cache latency is not currently used
|
||||||
|
#
|
||||||
|
class L2Cache(RubyCache):
|
||||||
|
latency = 15
|
||||||
|
|
||||||
|
def create_system(options, phys_mem, piobus, dma_devices):
|
||||||
|
|
||||||
|
if buildEnv['PROTOCOL'] != 'MESI_CMP_directory':
|
||||||
|
panic("This script requires the MESI_CMP_directory protocol to be built.")
|
||||||
|
|
||||||
|
cpu_sequencers = []
|
||||||
|
|
||||||
|
#
|
||||||
|
# The ruby network creation expects the list of nodes in the system to be
|
||||||
|
# consistent with the NetDest list. Therefore the l1 controller nodes must be
|
||||||
|
# listed before the directory nodes and directory nodes before dma nodes, etc.
|
||||||
|
#
|
||||||
|
l1_cntrl_nodes = []
|
||||||
|
l2_cntrl_nodes = []
|
||||||
|
dir_cntrl_nodes = []
|
||||||
|
dma_cntrl_nodes = []
|
||||||
|
|
||||||
|
#
|
||||||
|
# Must create the individual controllers before the network to ensure the
|
||||||
|
# controller constructors are called before the network constructor
|
||||||
|
#
|
||||||
|
|
||||||
|
for i in xrange(options.num_cpus):
|
||||||
|
#
|
||||||
|
# First create the Ruby objects associated with this cpu
|
||||||
|
#
|
||||||
|
l1i_cache = L1Cache(size = options.l1i_size,
|
||||||
|
assoc = options.l1i_assoc)
|
||||||
|
l1d_cache = L1Cache(size = options.l1d_size,
|
||||||
|
assoc = options.l1d_assoc)
|
||||||
|
|
||||||
|
cpu_seq = RubySequencer(icache = l1i_cache,
|
||||||
|
dcache = l1d_cache,
|
||||||
|
physMemPort = phys_mem.port,
|
||||||
|
physmem = phys_mem)
|
||||||
|
|
||||||
|
if piobus != None:
|
||||||
|
cpu_seq.pio_port = piobus.port
|
||||||
|
|
||||||
|
l1_cntrl = L1Cache_Controller(version = i,
|
||||||
|
sequencer = cpu_seq,
|
||||||
|
L1IcacheMemory = l1i_cache,
|
||||||
|
L1DcacheMemory = l1d_cache,
|
||||||
|
l2_select_num_bits = \
|
||||||
|
math.log(options.num_l2caches, 2))
|
||||||
|
#
|
||||||
|
# Add controllers and sequencers to the appropriate lists
|
||||||
|
#
|
||||||
|
cpu_sequencers.append(cpu_seq)
|
||||||
|
l1_cntrl_nodes.append(l1_cntrl)
|
||||||
|
|
||||||
|
for i in xrange(options.num_l2caches):
|
||||||
|
#
|
||||||
|
# First create the Ruby objects associated with this cpu
|
||||||
|
#
|
||||||
|
l2_cache = L2Cache(size = options.l2_size,
|
||||||
|
assoc = options.l2_assoc)
|
||||||
|
|
||||||
|
l2_cntrl = L2Cache_Controller(version = i,
|
||||||
|
L2cacheMemory = l2_cache)
|
||||||
|
|
||||||
|
l2_cntrl_nodes.append(l2_cntrl)
|
||||||
|
|
||||||
|
phys_mem_size = long(phys_mem.range.second) - long(phys_mem.range.first) + 1
|
||||||
|
mem_module_size = phys_mem_size / options.num_dirs
|
||||||
|
|
||||||
|
for i in xrange(options.num_dirs):
|
||||||
|
#
|
||||||
|
# Create the Ruby objects associated with the directory controller
|
||||||
|
#
|
||||||
|
|
||||||
|
mem_cntrl = RubyMemoryControl(version = i)
|
||||||
|
|
||||||
|
dir_size = MemorySize('0B')
|
||||||
|
dir_size.value = mem_module_size
|
||||||
|
|
||||||
|
dir_cntrl = Directory_Controller(version = i,
|
||||||
|
directory = \
|
||||||
|
RubyDirectoryMemory(version = i,
|
||||||
|
size = dir_size),
|
||||||
|
memBuffer = mem_cntrl)
|
||||||
|
|
||||||
|
dir_cntrl_nodes.append(dir_cntrl)
|
||||||
|
|
||||||
|
for i, dma_device in enumerate(dma_devices):
|
||||||
|
#
|
||||||
|
# Create the Ruby objects associated with the dma controller
|
||||||
|
#
|
||||||
|
dma_seq = DMASequencer(version = i,
|
||||||
|
physMemPort = phys_mem.port,
|
||||||
|
physmem = phys_mem)
|
||||||
|
|
||||||
|
dma_cntrl = DMA_Controller(version = i,
|
||||||
|
dma_sequencer = dma_seq)
|
||||||
|
|
||||||
|
dma_cntrl.dma_sequencer.port = dma_device.dma
|
||||||
|
dma_cntrl_nodes.append(dma_cntrl)
|
||||||
|
|
||||||
|
all_cntrls = l1_cntrl_nodes + \
|
||||||
|
l2_cntrl_nodes + \
|
||||||
|
dir_cntrl_nodes + \
|
||||||
|
dma_cntrl_nodes
|
||||||
|
|
||||||
|
return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
|
|
@ -33,6 +33,7 @@ from m5.defines import buildEnv
|
||||||
from m5.util import addToPath
|
from m5.util import addToPath
|
||||||
|
|
||||||
import MOESI_hammer
|
import MOESI_hammer
|
||||||
|
import MESI_CMP_directory
|
||||||
import MOESI_CMP_directory
|
import MOESI_CMP_directory
|
||||||
import MI_example
|
import MI_example
|
||||||
import MOESI_CMP_token
|
import MOESI_CMP_token
|
||||||
|
@ -47,6 +48,12 @@ def create_system(options, physmem, piobus = None, dma_devices = []):
|
||||||
physmem, \
|
physmem, \
|
||||||
piobus, \
|
piobus, \
|
||||||
dma_devices)
|
dma_devices)
|
||||||
|
elif protocol == "MESI_CMP_directory":
|
||||||
|
(cpu_sequencers, dir_cntrls, all_cntrls) = \
|
||||||
|
MESI_CMP_directory.create_system(options, \
|
||||||
|
physmem, \
|
||||||
|
piobus, \
|
||||||
|
dma_devices)
|
||||||
elif protocol == "MOESI_CMP_directory":
|
elif protocol == "MOESI_CMP_directory":
|
||||||
(cpu_sequencers, dir_cntrls, all_cntrls) = \
|
(cpu_sequencers, dir_cntrls, all_cntrls) = \
|
||||||
MOESI_CMP_directory.create_system(options, \
|
MOESI_CMP_directory.create_system(options, \
|
||||||
|
|
|
@ -28,11 +28,13 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
machine(L1Cache, "MSI Directory L1 Cache CMP")
|
machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
: int l1_request_latency,
|
: Sequencer * sequencer,
|
||||||
int l1_response_latency,
|
CacheMemory * L1IcacheMemory,
|
||||||
int to_l2_latency,
|
CacheMemory * L1DcacheMemory,
|
||||||
int l2_select_low_bit,
|
int l2_select_num_bits,
|
||||||
int l2_select_num_bits
|
int l1_request_latency = 2,
|
||||||
|
int l1_response_latency = 2,
|
||||||
|
int to_l2_latency = 1
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
|
@ -118,16 +120,6 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
int pendingAcks, default="0", desc="number of pending acks";
|
int pendingAcks, default="0", desc="number of pending acks";
|
||||||
}
|
}
|
||||||
|
|
||||||
external_type(CacheMemory) {
|
|
||||||
bool cacheAvail(Address);
|
|
||||||
Address cacheProbe(Address);
|
|
||||||
void allocate(Address, Entry);
|
|
||||||
void deallocate(Address);
|
|
||||||
Entry lookup(Address);
|
|
||||||
void changePermission(Address, AccessPermission);
|
|
||||||
bool isTagPresent(Address);
|
|
||||||
}
|
|
||||||
|
|
||||||
external_type(TBETable) {
|
external_type(TBETable) {
|
||||||
TBE lookup(Address);
|
TBE lookup(Address);
|
||||||
void allocate(Address);
|
void allocate(Address);
|
||||||
|
@ -137,30 +129,17 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
|
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
|
||||||
|
|
||||||
// CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
|
|
||||||
// CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
|
|
||||||
|
|
||||||
CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])';
|
|
||||||
|
|
||||||
CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])';
|
|
||||||
|
|
||||||
|
|
||||||
// MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
|
|
||||||
|
|
||||||
// Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
|
|
||||||
|
|
||||||
MessageBuffer mandatoryQueue, ordered="false";
|
MessageBuffer mandatoryQueue, ordered="false";
|
||||||
Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
|
|
||||||
|
|
||||||
|
|
||||||
int cache_state_to_int(State state);
|
int cache_state_to_int(State state);
|
||||||
|
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
||||||
|
|
||||||
// inclusive cache returns L1 entries only
|
// inclusive cache returns L1 entries only
|
||||||
Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
|
Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
|
||||||
if (L1DcacheMemory.isTagPresent(addr)) {
|
if (L1DcacheMemory.isTagPresent(addr)) {
|
||||||
return L1DcacheMemory[addr];
|
return static_cast(Entry, L1DcacheMemory[addr]);
|
||||||
} else {
|
} else {
|
||||||
return L1IcacheMemory[addr];
|
return static_cast(Entry, L1IcacheMemory[addr]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,9 +33,10 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
machine(L2Cache, "MESI Directory L2 Cache CMP")
|
machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
: int l2_request_latency,
|
: CacheMemory * L2cacheMemory,
|
||||||
int l2_response_latency,
|
int l2_request_latency = 2,
|
||||||
int to_l1_latency
|
int l2_response_latency = 2,
|
||||||
|
int to_l1_latency = 1
|
||||||
{
|
{
|
||||||
|
|
||||||
// L2 BANK QUEUES
|
// L2 BANK QUEUES
|
||||||
|
@ -145,17 +146,6 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
int pendingAcks, desc="number of pending acks for invalidates during writeback";
|
int pendingAcks, desc="number of pending acks for invalidates during writeback";
|
||||||
}
|
}
|
||||||
|
|
||||||
external_type(CacheMemory) {
|
|
||||||
bool cacheAvail(Address);
|
|
||||||
Address cacheProbe(Address);
|
|
||||||
void allocate(Address, Entry);
|
|
||||||
void deallocate(Address);
|
|
||||||
Entry lookup(Address);
|
|
||||||
void changePermission(Address, AccessPermission);
|
|
||||||
bool isTagPresent(Address);
|
|
||||||
void setMRU(Address);
|
|
||||||
}
|
|
||||||
|
|
||||||
external_type(TBETable) {
|
external_type(TBETable) {
|
||||||
TBE lookup(Address);
|
TBE lookup(Address);
|
||||||
void allocate(Address);
|
void allocate(Address);
|
||||||
|
@ -165,14 +155,9 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
|
|
||||||
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
|
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
|
||||||
|
|
||||||
// CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
|
|
||||||
|
|
||||||
|
|
||||||
CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
|
|
||||||
|
|
||||||
// inclusive cache, returns L2 entries only
|
// inclusive cache, returns L2 entries only
|
||||||
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
|
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
|
||||||
return L2cacheMemory[addr];
|
return static_cast(Entry, L2cacheMemory[addr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void changeL2Permission(Address addr, AccessPermission permission) {
|
void changeL2Permission(Address addr, AccessPermission permission) {
|
||||||
|
@ -190,13 +175,13 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isOneSharerLeft(Address addr, MachineID requestor) {
|
bool isOneSharerLeft(Address addr, MachineID requestor) {
|
||||||
assert(L2cacheMemory[addr].Sharers.isElement(requestor));
|
assert(getL2CacheEntry(addr).Sharers.isElement(requestor));
|
||||||
return (L2cacheMemory[addr].Sharers.count() == 1);
|
return (getL2CacheEntry(addr).Sharers.count() == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isSharer(Address addr, MachineID requestor) {
|
bool isSharer(Address addr, MachineID requestor) {
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
if (L2cacheMemory.isTagPresent(addr)) {
|
||||||
return L2cacheMemory[addr].Sharers.isElement(requestor);
|
return getL2CacheEntry(addr).Sharers.isElement(requestor);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -206,7 +191,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
//DEBUG_EXPR(machineID);
|
//DEBUG_EXPR(machineID);
|
||||||
//DEBUG_EXPR(requestor);
|
//DEBUG_EXPR(requestor);
|
||||||
//DEBUG_EXPR(addr);
|
//DEBUG_EXPR(addr);
|
||||||
L2cacheMemory[addr].Sharers.add(requestor);
|
getL2CacheEntry(addr).Sharers.add(requestor);
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(Address addr) {
|
||||||
|
@ -361,7 +346,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
|
||||||
} else {
|
} else {
|
||||||
// No room in the L2, so we need to make room before handling the request
|
// No room in the L2, so we need to make room before handling the request
|
||||||
if (L2cacheMemory[ L2cacheMemory.cacheProbe(in_msg.Address) ].Dirty ) {
|
if (getL2CacheEntry( L2cacheMemory.cacheProbe(in_msg.Address) ).Dirty ) {
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
|
trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
|
||||||
|
@ -393,7 +378,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := in_msg.Type;
|
out_msg.Type := in_msg.Type;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination.add(L2cacheMemory[address].Exclusive);
|
out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -537,7 +522,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
out_msg.Destination := L2cacheMemory[address].Sharers;
|
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -548,7 +533,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination := L2cacheMemory[address].Sharers;
|
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -561,7 +546,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination := L2cacheMemory[address].Sharers;
|
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
||||||
out_msg.Destination.remove(in_msg.Requestor);
|
out_msg.Destination.remove(in_msg.Requestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
|
@ -713,28 +698,28 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
|
|
||||||
action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
|
action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
L2cacheMemory[address].Sharers.remove(in_msg.Requestor);
|
getL2CacheEntry(address).Sharers.remove(in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
|
action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
L2cacheMemory[address].Sharers.clear();
|
getL2CacheEntry(address).Sharers.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mm_markExclusive, "\m", desc="set the exclusive owner") {
|
action(mm_markExclusive, "\m", desc="set the exclusive owner") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
L2cacheMemory[address].Sharers.clear();
|
getL2CacheEntry(address).Sharers.clear();
|
||||||
L2cacheMemory[address].Exclusive := in_msg.Requestor;
|
getL2CacheEntry(address).Exclusive := in_msg.Requestor;
|
||||||
addSharer(address, in_msg.Requestor);
|
addSharer(address, in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
|
action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
|
||||||
peek(L1unblockNetwork_in, ResponseMsg) {
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
||||||
L2cacheMemory[address].Sharers.clear();
|
getL2CacheEntry(address).Sharers.clear();
|
||||||
L2cacheMemory[address].Exclusive := in_msg.Sender;
|
getL2CacheEntry(address).Exclusive := in_msg.Sender;
|
||||||
addSharer(address, in_msg.Sender);
|
addSharer(address, in_msg.Sender);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,10 @@
|
||||||
|
|
||||||
|
|
||||||
machine(Directory, "MESI_CMP_filter_directory protocol")
|
machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
: int to_mem_ctrl_latency,
|
: DirectoryMemory * directory,
|
||||||
int directory_latency
|
MemoryControl * memBuffer,
|
||||||
|
int to_mem_ctrl_latency = 1,
|
||||||
|
int directory_latency = 6
|
||||||
{
|
{
|
||||||
|
|
||||||
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
|
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
|
||||||
|
@ -78,23 +80,13 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
// TYPES
|
// TYPES
|
||||||
|
|
||||||
// DirectoryEntry
|
// DirectoryEntry
|
||||||
structure(Entry, desc="...") {
|
structure(Entry, desc="...", interface="AbstractEntry") {
|
||||||
State DirectoryState, desc="Directory state";
|
State DirectoryState, desc="Directory state";
|
||||||
DataBlock DataBlk, desc="data for the block";
|
DataBlock DataBlk, desc="data for the block";
|
||||||
NetDest Sharers, desc="Sharers for this block";
|
NetDest Sharers, desc="Sharers for this block";
|
||||||
NetDest Owner, desc="Owner of this block";
|
NetDest Owner, desc="Owner of this block";
|
||||||
}
|
}
|
||||||
|
|
||||||
external_type(DirectoryMemory) {
|
|
||||||
Entry lookup(Address);
|
|
||||||
bool isPresent(Address);
|
|
||||||
}
|
|
||||||
|
|
||||||
// to simulate detailed DRAM
|
|
||||||
external_type(MemoryControl, inport="yes", outport="yes") {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// TBE entries for DMA requests
|
// TBE entries for DMA requests
|
||||||
structure(TBE, desc="TBE entries for outstanding DMA requests") {
|
structure(TBE, desc="TBE entries for outstanding DMA requests") {
|
||||||
Address PhysicalAddress, desc="physical address";
|
Address PhysicalAddress, desc="physical address";
|
||||||
|
@ -113,21 +105,17 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
|
|
||||||
// ** OBJECTS **
|
// ** OBJECTS **
|
||||||
|
|
||||||
// DirectoryMemory directory, constructor_hack="i";
|
|
||||||
// MemoryControl memBuffer, constructor_hack="i";
|
|
||||||
|
|
||||||
DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory"])';
|
|
||||||
|
|
||||||
MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_control"])';
|
|
||||||
|
|
||||||
|
|
||||||
TBETable TBEs, template_hack="<Directory_TBE>";
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
||||||
|
|
||||||
|
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
||||||
|
return static_cast(Entry, directory[addr]);
|
||||||
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(Address addr) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (TBEs.isPresent(addr)) {
|
||||||
return TBEs[addr].TBEState;
|
return TBEs[addr].TBEState;
|
||||||
} else if (directory.isPresent(addr)) {
|
} else if (directory.isPresent(addr)) {
|
||||||
return directory[addr].DirectoryState;
|
return getDirectoryEntry(addr).DirectoryState;
|
||||||
} else {
|
} else {
|
||||||
return State:I;
|
return State:I;
|
||||||
}
|
}
|
||||||
|
@ -143,14 +131,14 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
if (directory.isPresent(addr)) {
|
if (directory.isPresent(addr)) {
|
||||||
|
|
||||||
if (state == State:I) {
|
if (state == State:I) {
|
||||||
assert(directory[addr].Owner.count() == 0);
|
assert(getDirectoryEntry(addr).Owner.count() == 0);
|
||||||
assert(directory[addr].Sharers.count() == 0);
|
assert(getDirectoryEntry(addr).Sharers.count() == 0);
|
||||||
} else if (state == State:M) {
|
} else if (state == State:M) {
|
||||||
assert(directory[addr].Owner.count() == 1);
|
assert(getDirectoryEntry(addr).Owner.count() == 1);
|
||||||
assert(directory[addr].Sharers.count() == 0);
|
assert(getDirectoryEntry(addr).Sharers.count() == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
directory[addr].DirectoryState := state;
|
getDirectoryEntry(addr).DirectoryState := state;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,7 +269,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
||||||
out_msg.MessageSize := in_msg.MessageSize;
|
out_msg.MessageSize := in_msg.MessageSize;
|
||||||
out_msg.Prefetch := in_msg.Prefetch;
|
out_msg.Prefetch := in_msg.Prefetch;
|
||||||
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
|
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
|
||||||
|
|
||||||
DEBUG_EXPR(out_msg);
|
DEBUG_EXPR(out_msg);
|
||||||
}
|
}
|
||||||
|
@ -306,7 +294,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
|
|
||||||
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
|
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
|
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
|
||||||
DEBUG_EXPR(in_msg.Address);
|
DEBUG_EXPR(in_msg.Address);
|
||||||
DEBUG_EXPR(in_msg.DataBlk);
|
DEBUG_EXPR(in_msg.DataBlk);
|
||||||
}
|
}
|
||||||
|
@ -320,7 +308,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.OriginalRequestorMachId := machineID;
|
out_msg.OriginalRequestorMachId := machineID;
|
||||||
out_msg.MessageSize := in_msg.MessageSize;
|
out_msg.MessageSize := in_msg.MessageSize;
|
||||||
out_msg.DataBlk := directory[address].DataBlk;
|
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
|
||||||
DEBUG_EXPR(out_msg);
|
DEBUG_EXPR(out_msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -344,7 +332,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
|
|
||||||
action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
|
action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
directory[address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
|
getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,8 +374,8 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
|
|
||||||
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
|
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
directory[address].Owner.clear();
|
getDirectoryEntry(address).Owner.clear();
|
||||||
directory[address].Owner.add(in_msg.Requestor);
|
getDirectoryEntry(address).Owner.add(in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -398,7 +386,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:INV;
|
out_msg.Type := CoherenceResponseType:INV;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination := directory[address].Owner;
|
out_msg.Destination := getDirectoryEntry(address).Owner;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -418,7 +406,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(c_clearOwner, "c", desc="Clear the owner field") {
|
action(c_clearOwner, "c", desc="Clear the owner field") {
|
||||||
directory[address].Owner.clear();
|
getDirectoryEntry(address).Owner.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
|
@ -431,8 +419,8 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
||||||
//directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
|
//getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
|
||||||
directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
|
|
||||||
machine(DMA, "DMA Controller")
|
machine(DMA, "DMA Controller")
|
||||||
: int request_latency
|
: DMASequencer * dma_sequencer,
|
||||||
|
int request_latency = 6
|
||||||
{
|
{
|
||||||
|
|
||||||
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", no_vector="true";
|
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", no_vector="true";
|
||||||
|
@ -25,7 +26,6 @@ machine(DMA, "DMA Controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
|
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
|
||||||
DMASequencer dma_sequencer, factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])', no_vector="true";
|
|
||||||
State cur_state, no_vector="true";
|
State cur_state, no_vector="true";
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(Address addr) {
|
||||||
|
|
Loading…
Reference in a new issue