ruby: moesi cmp token: cosmetic changes
Updates copyright years, removes space at the end of lines, shortens variable names.
This commit is contained in:
parent
e7ce518168
commit
bd3d1955da
5 changed files with 85 additions and 97 deletions
|
@ -100,8 +100,8 @@ def create_system(options, system, piobus, dma_ports, ruby_system):
|
|||
|
||||
l1_cntrl = L1Cache_Controller(version = i,
|
||||
cntrl_id = cntrl_count,
|
||||
L1IcacheMemory = l1i_cache,
|
||||
L1DcacheMemory = l1d_cache,
|
||||
L1Icache = l1i_cache,
|
||||
L1Dcache = l1d_cache,
|
||||
l2_select_num_bits = l2_bits,
|
||||
N_tokens = n_tokens,
|
||||
retry_threshold = \
|
||||
|
@ -147,7 +147,7 @@ def create_system(options, system, piobus, dma_ports, ruby_system):
|
|||
|
||||
l2_cntrl = L2Cache_Controller(version = i,
|
||||
cntrl_id = cntrl_count,
|
||||
L2cacheMemory = l2_cache,
|
||||
L2cache = l2_cache,
|
||||
N_tokens = n_tokens,
|
||||
ruby_system = ruby_system)
|
||||
|
||||
|
|
|
@ -31,10 +31,10 @@
|
|||
*
|
||||
*/
|
||||
|
||||
machine(L1Cache, "Token protocol")
|
||||
machine(L1Cache, "Token protocol")
|
||||
: Sequencer * sequencer,
|
||||
CacheMemory * L1IcacheMemory,
|
||||
CacheMemory * L1DcacheMemory,
|
||||
CacheMemory * L1Icache,
|
||||
CacheMemory * L1Dcache,
|
||||
int l2_select_num_bits,
|
||||
int N_tokens,
|
||||
|
||||
|
@ -175,7 +175,7 @@ machine(L1Cache, "Token protocol")
|
|||
int countStarvingForAddress(Address);
|
||||
int countReadStarvingForAddress(Address);
|
||||
}
|
||||
|
||||
|
||||
void set_cache_entry(AbstractCacheEntry b);
|
||||
void unset_cache_entry();
|
||||
void set_tbe(TBE b);
|
||||
|
@ -224,12 +224,12 @@ machine(L1Cache, "Token protocol")
|
|||
}
|
||||
|
||||
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
|
||||
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
|
||||
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
|
||||
if(is_valid(L1Dcache_entry)) {
|
||||
return L1Dcache_entry;
|
||||
}
|
||||
|
||||
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
|
||||
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
|
||||
return L1Icache_entry;
|
||||
}
|
||||
|
||||
|
@ -238,12 +238,12 @@ machine(L1Cache, "Token protocol")
|
|||
}
|
||||
|
||||
Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
|
||||
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
|
||||
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
|
||||
return L1Dcache_entry;
|
||||
}
|
||||
|
||||
Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
|
||||
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
|
||||
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
|
||||
return L1Icache_entry;
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ machine(L1Cache, "Token protocol")
|
|||
}
|
||||
|
||||
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
|
||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
||||
assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
|
||||
|
||||
if (is_valid(tbe)) {
|
||||
assert(state != State:I);
|
||||
|
@ -404,10 +404,10 @@ machine(L1Cache, "Token protocol")
|
|||
//
|
||||
// NOTE direct local hits should not call this
|
||||
//
|
||||
return GenericMachineType:L1Cache_wCC;
|
||||
return GenericMachineType:L1Cache_wCC;
|
||||
} else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
|
||||
|
||||
if (sender == (mapAddressToRange(addr,
|
||||
if (sender == (mapAddressToRange(addr,
|
||||
MachineType:L2Cache,
|
||||
l2_select_low_bit,
|
||||
l2_select_num_bits))) {
|
||||
|
@ -501,7 +501,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
// React to the message based on the current state of the table
|
||||
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||
TBE tbe := L1_TBEs[in_msg.Address];
|
||||
TBE tbe := L1_TBEs[in_msg.Address];
|
||||
|
||||
if (persistentTable.isLocked(in_msg.Address)) {
|
||||
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
|
||||
|
@ -623,7 +623,7 @@ machine(L1Cache, "Token protocol")
|
|||
cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
if (getTokens(cache_entry) == 1 ||
|
||||
if (getTokens(cache_entry) == 1 ||
|
||||
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||
if (in_msg.isLocal) {
|
||||
trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address,
|
||||
|
@ -678,16 +678,16 @@ machine(L1Cache, "Token protocol")
|
|||
L1Dcache_entry, tbe);
|
||||
}
|
||||
|
||||
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
||||
// L1 does't have the line, but we have space for it in the L1
|
||||
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||
in_msg.LineAddress, L1Icache_entry, tbe);
|
||||
} else {
|
||||
// No room in the L1, so we need to make room
|
||||
trigger(Event:L1_Replacement,
|
||||
L1IcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||
getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||
L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||
L1Icache.cacheProbe(in_msg.LineAddress),
|
||||
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
||||
L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -709,16 +709,16 @@ machine(L1Cache, "Token protocol")
|
|||
L1Icache_entry, tbe);
|
||||
}
|
||||
|
||||
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
||||
// L1 does't have the line, but we have space for it in the L1
|
||||
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||
in_msg.LineAddress, L1Dcache_entry, tbe);
|
||||
} else {
|
||||
// No room in the L1, so we need to make room
|
||||
trigger(Event:L1_Replacement,
|
||||
L1DcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||
getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||
L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||
L1Dcache.cacheProbe(in_msg.LineAddress),
|
||||
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
||||
L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -744,14 +744,14 @@ machine(L1Cache, "Token protocol")
|
|||
out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
||||
|
||||
//
|
||||
// Currently the configuration system limits the system to only one
|
||||
// chip. Therefore, if we assume one shared L2 cache, then only one
|
||||
// pertinent L2 cache exist.
|
||||
//
|
||||
//out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
|
||||
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
MachineType:L2Cache,
|
||||
l2_select_low_bit,
|
||||
|
@ -872,7 +872,7 @@ machine(L1Cache, "Token protocol")
|
|||
// pertinent L2 cache exist.
|
||||
//
|
||||
//out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
|
||||
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
MachineType:L2Cache,
|
||||
l2_select_low_bit,
|
||||
|
@ -1289,8 +1289,8 @@ machine(L1Cache, "Token protocol")
|
|||
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
|
||||
address, cache_entry.DataBlk);
|
||||
|
||||
sequencer.readCallback(address,
|
||||
GenericMachineType:L1Cache,
|
||||
sequencer.readCallback(address,
|
||||
GenericMachineType:L1Cache,
|
||||
cache_entry.DataBlk);
|
||||
|
||||
}
|
||||
|
@ -1301,7 +1301,7 @@ machine(L1Cache, "Token protocol")
|
|||
address, cache_entry.DataBlk);
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
|
||||
sequencer.readCallback(address,
|
||||
sequencer.readCallback(address,
|
||||
getNondirectHitMachType(address, in_msg.Sender),
|
||||
cache_entry.DataBlk);
|
||||
|
||||
|
@ -1313,8 +1313,8 @@ machine(L1Cache, "Token protocol")
|
|||
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
|
||||
address, cache_entry.DataBlk);
|
||||
|
||||
sequencer.writeCallback(address,
|
||||
GenericMachineType:L1Cache,
|
||||
sequencer.writeCallback(address,
|
||||
GenericMachineType:L1Cache,
|
||||
cache_entry.DataBlk);
|
||||
|
||||
cache_entry.Dirty := true;
|
||||
|
@ -1506,10 +1506,10 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
|
||||
assert(getTokens(cache_entry) == 0);
|
||||
if (L1DcacheMemory.isTagPresent(address)) {
|
||||
L1DcacheMemory.deallocate(address);
|
||||
if (L1Dcache.isTagPresent(address)) {
|
||||
L1Dcache.deallocate(address);
|
||||
} else {
|
||||
L1IcacheMemory.deallocate(address);
|
||||
L1Icache.deallocate(address);
|
||||
}
|
||||
unset_cache_entry();
|
||||
}
|
||||
|
@ -1517,14 +1517,14 @@ machine(L1Cache, "Token protocol")
|
|||
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
||||
if (is_valid(cache_entry)) {
|
||||
} else {
|
||||
set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
|
||||
set_cache_entry(L1Dcache.allocate(address, new Entry));
|
||||
}
|
||||
}
|
||||
|
||||
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
|
||||
if (is_valid(cache_entry)) {
|
||||
} else {
|
||||
set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
|
||||
set_cache_entry(L1Icache.allocate(address, new Entry));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1536,19 +1536,19 @@ machine(L1Cache, "Token protocol")
|
|||
}
|
||||
|
||||
action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
|
||||
++L1IcacheMemory.demand_misses;
|
||||
++L1Icache.demand_misses;
|
||||
}
|
||||
|
||||
action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
|
||||
++L1IcacheMemory.demand_hits;
|
||||
++L1Icache.demand_hits;
|
||||
}
|
||||
|
||||
action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
|
||||
++L1DcacheMemory.demand_misses;
|
||||
++L1Dcache.demand_misses;
|
||||
}
|
||||
|
||||
action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
|
||||
++L1DcacheMemory.demand_hits;
|
||||
++L1Dcache.demand_hits;
|
||||
}
|
||||
|
||||
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
|
||||
|
@ -1561,8 +1561,8 @@ machine(L1Cache, "Token protocol")
|
|||
action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
|
||||
}
|
||||
stall_and_wait(mandatoryQueue_in, address);
|
||||
}
|
||||
stall_and_wait(mandatoryQueue_in, address);
|
||||
}
|
||||
|
||||
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
|
||||
|
@ -2261,7 +2261,7 @@ machine(L1Cache, "Token protocol")
|
|||
s_deallocateTBE;
|
||||
j_unsetReissueTimer;
|
||||
n_popResponseQueue;
|
||||
kd_wakeUpDependents;
|
||||
kd_wakeUpDependents;
|
||||
}
|
||||
|
||||
transition(IS, Data_All_Tokens, M_W) {
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
|
||||
/*
|
||||
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
||||
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -27,13 +26,8 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Id$
|
||||
*
|
||||
*/
|
||||
|
||||
machine(L2Cache, "Token protocol")
|
||||
: CacheMemory * L2cacheMemory,
|
||||
machine(L2Cache, "Token protocol")
|
||||
: CacheMemory * L2cache,
|
||||
int N_tokens,
|
||||
Cycles l2_request_latency = 5,
|
||||
Cycles l2_response_latency = 5,
|
||||
|
@ -152,10 +146,10 @@ machine(L2Cache, "Token protocol")
|
|||
void unset_cache_entry();
|
||||
|
||||
Entry getCacheEntry(Address address), return_by_pointer="yes" {
|
||||
Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
|
||||
Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
|
||||
return cache_entry;
|
||||
}
|
||||
|
||||
|
||||
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
|
||||
return getCacheEntry(addr).DataBlk;
|
||||
}
|
||||
|
@ -411,7 +405,7 @@ machine(L2Cache, "Token protocol")
|
|||
in_msg.Type == CoherenceResponseType:WB_OWNED ||
|
||||
in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
|
||||
if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
|
||||
if (L2cache.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
|
||||
|
||||
// either room is available or the block is already present
|
||||
|
||||
|
@ -429,8 +423,8 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
else {
|
||||
trigger(Event:L2_Replacement,
|
||||
L2cacheMemory.cacheProbe(in_msg.Address),
|
||||
getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
|
||||
L2cache.cacheProbe(in_msg.Address),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.Address)));
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
||||
trigger(Event:L1_INV, in_msg.Address, cache_entry);
|
||||
|
@ -447,7 +441,7 @@ machine(L2Cache, "Token protocol")
|
|||
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
|
||||
in_msg.Type == CoherenceResponseType:WB_OWNED ||
|
||||
in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
|
||||
if (L2cache.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
|
||||
|
||||
// either room is available or the block is already present
|
||||
|
||||
|
@ -466,8 +460,8 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
else {
|
||||
trigger(Event:L2_Replacement,
|
||||
L2cacheMemory.cacheProbe(in_msg.Address),
|
||||
getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
|
||||
L2cache.cacheProbe(in_msg.Address),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.Address)));
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
||||
trigger(Event:L1_INV, in_msg.Address, cache_entry);
|
||||
|
@ -497,7 +491,7 @@ machine(L2Cache, "Token protocol")
|
|||
out_msg.RetryNum := in_msg.RetryNum;
|
||||
|
||||
//
|
||||
// If a statically shared L2 cache, then no other L2 caches can
|
||||
// If a statically shared L2 cache, then no other L2 caches can
|
||||
// store the block
|
||||
//
|
||||
//out_msg.Destination.broadcast(MachineType:L2Cache);
|
||||
|
@ -778,7 +772,7 @@ machine(L2Cache, "Token protocol")
|
|||
enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
|
||||
out_msg.Address := in_msg.Address;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
|
||||
|
||||
//
|
||||
// Currently assuming only one chip so all L1s are local
|
||||
//
|
||||
|
@ -905,7 +899,7 @@ machine(L2Cache, "Token protocol")
|
|||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
|
||||
(is_valid(cache_entry))) {
|
||||
L2cacheMemory.setMRU(address);
|
||||
L2cache.setMRU(address);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -957,20 +951,20 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
|
||||
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
|
||||
set_cache_entry(L2cacheMemory.allocate(address, new Entry));
|
||||
set_cache_entry(L2cache.allocate(address, new Entry));
|
||||
}
|
||||
|
||||
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
||||
L2cacheMemory.deallocate(address);
|
||||
L2cache.deallocate(address);
|
||||
unset_cache_entry();
|
||||
}
|
||||
|
||||
action(uu_profileMiss, "\um", desc="Profile the demand miss") {
|
||||
++L2cacheMemory.demand_misses;
|
||||
++L2cache.demand_misses;
|
||||
}
|
||||
|
||||
action(uu_profileHit, "\uh", desc="Profile the demand hit") {
|
||||
++L2cacheMemory.demand_hits;
|
||||
++L2cache.demand_hits;
|
||||
}
|
||||
|
||||
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
|
||||
|
@ -1053,8 +1047,8 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
|
||||
|
||||
transition(NP,
|
||||
{Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
|
||||
transition(NP,
|
||||
{Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
|
||||
I_L) {
|
||||
l_popPersistentQueue;
|
||||
}
|
||||
|
@ -1089,8 +1083,8 @@ machine(L2Cache, "Token protocol")
|
|||
m_popRequestQueue;
|
||||
}
|
||||
|
||||
transition(I,
|
||||
{Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
|
||||
transition(I,
|
||||
{Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
|
||||
I_L) {
|
||||
e_sendAckWithCollectedTokens;
|
||||
l_popPersistentQueue;
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
|
||||
/*
|
||||
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
||||
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -27,12 +26,7 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* $Id$
|
||||
*/
|
||||
|
||||
|
||||
machine(Directory, "Token protocol")
|
||||
machine(Directory, "Token protocol")
|
||||
: DirectoryMemory * directory,
|
||||
MemoryControl * memBuffer,
|
||||
int l2_select_num_bits,
|
||||
|
@ -198,7 +192,7 @@ machine(Directory, "Token protocol")
|
|||
|
||||
if (state == State:L || state == State:DW_L || state == State:DR_L) {
|
||||
assert(getDirectoryEntry(addr).Tokens == 0);
|
||||
}
|
||||
}
|
||||
|
||||
// We have one or zero owners
|
||||
assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
|
||||
|
@ -245,14 +239,14 @@ machine(Directory, "Token protocol")
|
|||
out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
|
||||
out_port(requestNetwork_out, RequestMsg, requestFromDir);
|
||||
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
|
||||
|
||||
|
||||
//
|
||||
// Memory buffer for memory controller to DIMM communication
|
||||
//
|
||||
out_port(memQueue_out, MemoryMsg, memBuffer);
|
||||
|
||||
// ** IN_PORTS **
|
||||
|
||||
|
||||
// off-chip memory request/response is done
|
||||
in_port(memQueue_in, MemoryMsg, memBuffer) {
|
||||
if (memQueue_in.isReady()) {
|
||||
|
@ -346,7 +340,7 @@ machine(Directory, "Token protocol")
|
|||
} else {
|
||||
// locked
|
||||
trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// unlocked
|
||||
trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||
|
@ -447,7 +441,7 @@ machine(Directory, "Token protocol")
|
|||
// pertinent L2 cache exist.
|
||||
//
|
||||
//out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
|
||||
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
MachineType:L2Cache,
|
||||
l2_select_low_bit,
|
||||
|
@ -517,7 +511,7 @@ machine(Directory, "Token protocol")
|
|||
// pertinent L2 cache exist.
|
||||
//
|
||||
//out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
|
||||
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
MachineType:L2Cache,
|
||||
l2_select_low_bit,
|
||||
|
@ -614,7 +608,7 @@ machine(Directory, "Token protocol")
|
|||
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
|
||||
out_msg.Dirty := false;
|
||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||
}
|
||||
}
|
||||
}
|
||||
getDirectoryEntry(address).Tokens := 0;
|
||||
}
|
||||
|
@ -635,7 +629,7 @@ machine(Directory, "Token protocol")
|
|||
}
|
||||
|
||||
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
||||
out_msg.Address := address;
|
||||
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
||||
|
@ -661,7 +655,7 @@ machine(Directory, "Token protocol")
|
|||
}
|
||||
|
||||
action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
|
||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
||||
out_msg.Address := address;
|
||||
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
||||
|
@ -893,7 +887,7 @@ machine(Directory, "Token protocol")
|
|||
out_msg.PhysicalAddress := address;
|
||||
out_msg.LineAddress := address;
|
||||
out_msg.Type := DMAResponseType:ACK;
|
||||
out_msg.Destination.add(tbe.DmaRequestor);
|
||||
out_msg.Destination.add(tbe.DmaRequestor);
|
||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||
}
|
||||
}
|
||||
|
@ -905,7 +899,7 @@ machine(Directory, "Token protocol")
|
|||
out_msg.LineAddress := address;
|
||||
out_msg.Type := DMAResponseType:DATA;
|
||||
//
|
||||
// we send the entire data block and rely on the dma controller to
|
||||
// we send the entire data block and rely on the dma controller to
|
||||
// split it up if need be
|
||||
//
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
|
@ -922,7 +916,7 @@ machine(Directory, "Token protocol")
|
|||
out_msg.LineAddress := address;
|
||||
out_msg.Type := DMAResponseType:DATA;
|
||||
//
|
||||
// we send the entire data block and rely on the dma controller to
|
||||
// we send the entire data block and rely on the dma controller to
|
||||
// split it up if need be
|
||||
//
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
|
@ -935,7 +929,7 @@ machine(Directory, "Token protocol")
|
|||
// TRANSITIONS
|
||||
|
||||
//
|
||||
// Trans. from base state O
|
||||
// Trans. from base state O
|
||||
// the directory has valid data
|
||||
//
|
||||
transition(O, GETX, NO_W) {
|
||||
|
@ -1267,7 +1261,7 @@ machine(Directory, "Token protocol")
|
|||
// presistent request is issued and resolve before memory returns with data
|
||||
//
|
||||
transition(O_W, {Memory_Ack, Memory_Data}, O) {
|
||||
l_popMemQueue;
|
||||
l_popMemQueue;
|
||||
}
|
||||
|
||||
transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*/
|
||||
|
||||
|
||||
machine(DMA, "DMA Controller")
|
||||
machine(DMA, "DMA Controller")
|
||||
: DMASequencer * dma_sequencer,
|
||||
Cycles request_latency = 6
|
||||
{
|
||||
|
@ -108,7 +108,7 @@ machine(DMA, "DMA Controller")
|
|||
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
|
||||
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
|
||||
out_msg.LineAddress := in_msg.LineAddress;
|
||||
out_msg.LineAddress := in_msg.LineAddress;
|
||||
out_msg.Type := DMARequestType:READ;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
|
@ -123,7 +123,7 @@ machine(DMA, "DMA Controller")
|
|||
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||
enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
|
||||
out_msg.PhysicalAddress := in_msg.PhysicalAddress;
|
||||
out_msg.LineAddress := in_msg.LineAddress;
|
||||
out_msg.LineAddress := in_msg.LineAddress;
|
||||
out_msg.Type := DMARequestType:WRITE;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
|
|
Loading…
Reference in a new issue