ruby: moesi cmp directory: cosmetic changes

Updates copyright years, removes space at the end of lines, shortens
variable names.
This commit is contained in:
Nilay Vaish 2013-05-21 11:32:15 -05:00
parent 9bc75e3c58
commit e7ce518168
4 changed files with 68 additions and 81 deletions

View file

@ -89,8 +89,8 @@ def create_system(options, system, piobus, dma_ports, ruby_system):
l1_cntrl = L1Cache_Controller(version = i,
cntrl_id = cntrl_count,
L1IcacheMemory = l1i_cache,
L1DcacheMemory = l1d_cache,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = (
options.cpu_type == "detailed"),
@ -127,7 +127,7 @@ def create_system(options, system, piobus, dma_ports, ruby_system):
l2_cntrl = L2Cache_Controller(version = i,
cntrl_id = cntrl_count,
L2cacheMemory = l2_cache,
L2cache = l2_cache,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,15 +26,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
machine(L1Cache, "Directory protocol")
: Sequencer * sequencer,
CacheMemory * L1IcacheMemory,
CacheMemory * L1DcacheMemory,
CacheMemory * L1Icache,
CacheMemory * L1Dcache,
int l2_select_num_bits,
Cycles request_latency = 2,
Cycles use_timeout_latency = 50,
@ -147,21 +142,21 @@ machine(L1Cache, "Directory protocol")
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
return L1Icache_entry;
}
Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
return static_cast(Entry, "pointer", L1Dcache.lookup(addr));
}
Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
return static_cast(Entry, "pointer", L1Icache.lookup(addr));
}
State getState(TBE tbe, Entry cache_entry, Address addr) {
@ -174,7 +169,7 @@ machine(L1Cache, "Directory protocol")
}
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
if (is_valid(tbe)) {
tbe.TBEState := state;
@ -365,7 +360,7 @@ machine(L1Cache, "Directory protocol")
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
TBEs[in_msg.LineAddress]);
}
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
@ -373,9 +368,9 @@ machine(L1Cache, "Directory protocol")
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1IcacheMemory.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
@ -396,7 +391,7 @@ machine(L1Cache, "Directory protocol")
trigger(Event:L1_Replacement, in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
@ -404,9 +399,9 @@ machine(L1Cache, "Directory protocol")
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1DcacheMemory.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
@ -831,23 +826,23 @@ machine(L1Cache, "Directory protocol")
}
action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
if (L1Dcache.isTagPresent(address)) {
L1Dcache.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
L1Icache.deallocate(address);
}
unset_cache_entry();
}
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
if ((is_invalid(cache_entry))) {
set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
set_cache_entry(L1Dcache.allocate(address, new Entry));
}
}
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
if ((is_invalid(cache_entry))) {
set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
set_cache_entry(L1Icache.allocate(address, new Entry));
}
}

View file

@ -1,6 +1,5 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -27,13 +26,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
machine(L2Cache, "Token protocol")
: CacheMemory * L2cacheMemory,
: CacheMemory * L2cache,
Cycles response_latency = 2,
Cycles request_latency = 2
{
@ -233,7 +227,7 @@ machine(L2Cache, "Token protocol")
void unset_tbe();
Entry getCacheEntry(Address address), return_by_pointer="yes" {
return static_cast(Entry, "pointer", L2cacheMemory[address]);
return static_cast(Entry, "pointer", L2cache[address]);
}
bool isDirTagPresent(Address addr) {
@ -453,7 +447,7 @@ machine(L2Cache, "Token protocol")
}
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
assert((localDirectory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
assert((localDirectory.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
if (is_valid(tbe)) {
tbe.TBEState := state;
@ -653,10 +647,10 @@ machine(L2Cache, "Token protocol")
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
Entry cache_entry := getCacheEntry(in_msg.Address);
if (is_invalid(cache_entry) &&
L2cacheMemory.cacheAvail(in_msg.Address) == false) {
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address),
getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)),
TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
L2cache.cacheAvail(in_msg.Address) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.Address),
getCacheEntry(L2cache.cacheProbe(in_msg.Address)),
TBEs[L2cache.cacheProbe(in_msg.Address)]);
}
else {
trigger(Event:L1_WBDIRTYDATA, in_msg.Address,
@ -665,10 +659,10 @@ machine(L2Cache, "Token protocol")
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
Entry cache_entry := getCacheEntry(in_msg.Address);
if (is_invalid(cache_entry) &&
L2cacheMemory.cacheAvail(in_msg.Address) == false) {
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address),
getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)),
TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
L2cache.cacheAvail(in_msg.Address) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.Address),
getCacheEntry(L2cache.cacheProbe(in_msg.Address)),
TBEs[L2cache.cacheProbe(in_msg.Address)]);
}
else {
trigger(Event:L1_WBCLEANDATA, in_msg.Address,
@ -1406,7 +1400,7 @@ machine(L2Cache, "Token protocol")
action( r_setMRU, "\rrr", desc="manually set the MRU bit for cache line" ) {
if(is_valid(cache_entry)) {
L2cacheMemory.setMRU(address);
L2cache.setMRU(address);
}
}
@ -1459,11 +1453,11 @@ machine(L2Cache, "Token protocol")
}
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
set_cache_entry(L2cacheMemory.allocate(address, new Entry));
set_cache_entry(L2cache.allocate(address, new Entry));
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
L2cacheMemory.deallocate(address);
L2cache.deallocate(address);
unset_cache_entry();
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,10 +26,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
machine(Directory, "Directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
@ -196,7 +192,8 @@ machine(Directory, "Directory protocol")
return getDirectoryEntry(addr).DataBlk;
}
// if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
// if no sharers, then directory can be considered
// both a sharer and exclusive w.r.t. coherence checking
bool isBlockShared(Address addr) {
if (directory.isPresent(addr)) {
if (getDirectoryEntry(addr).DirectoryState == State:I) {
@ -219,7 +216,6 @@ machine(Directory, "Directory protocol")
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
@ -422,7 +418,8 @@ machine(Directory, "Directory protocol")
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) &&
(getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
@ -509,7 +506,8 @@ machine(Directory, "Directory protocol")
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// These are not used by memory but are passed back here with the read data:
out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS &&
getDirectoryEntry(address).Sharers.count() == 0);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;