ruby: drop the [] notation for lookup function.
This is in preparation for adding a second arugment to the lookup function for the CacheMemory class. The change to *.sm files was made using the following sed command: sed -i 's/\[\([0-9A-Za-z._()]*\)\]/.lookup(\1)/' src/mem/protocol/*.sm
This commit is contained in:
parent
1a3e8a3370
commit
f391cee5e1
16 changed files with 239 additions and 243 deletions
|
@ -145,22 +145,22 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
|
|
||||||
// inclusive cache returns L0 entries only
|
// inclusive cache returns L0 entries only
|
||||||
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
|
Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
|
||||||
if(is_valid(Dcache_entry)) {
|
if(is_valid(Dcache_entry)) {
|
||||||
return Dcache_entry;
|
return Dcache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
|
Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
|
||||||
return Icache_entry;
|
return Icache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
|
Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
|
||||||
return Dcache_entry;
|
return Dcache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
|
Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
|
||||||
return Icache_entry;
|
return Icache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
|
||||||
return L0Cache_State_to_permission(tbe.TBEState);
|
return L0Cache_State_to_permission(tbe.TBEState);
|
||||||
|
@ -206,7 +206,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -217,7 +217,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -260,7 +260,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
assert(in_msg.Dest == machineID);
|
assert(in_msg.Dest == machineID);
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
|
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -301,7 +301,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
if (is_valid(Icache_entry)) {
|
if (is_valid(Icache_entry)) {
|
||||||
// The tag matches for the L0, so the L0 asks the L2 for it.
|
// The tag matches for the L0, so the L0 asks the L2 for it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
Icache_entry, TBEs[in_msg.LineAddress]);
|
Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L0
|
// Check to see if it is in the OTHER L0
|
||||||
|
@ -309,19 +309,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
if (is_valid(Dcache_entry)) {
|
if (is_valid(Dcache_entry)) {
|
||||||
// The block is in the wrong L0, put the request on the queue to the shared L2
|
// The block is in the wrong L0, put the request on the queue to the shared L2
|
||||||
trigger(Event:L0_Replacement, in_msg.LineAddress,
|
trigger(Event:L0_Replacement, in_msg.LineAddress,
|
||||||
Dcache_entry, TBEs[in_msg.LineAddress]);
|
Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Icache.cacheAvail(in_msg.LineAddress)) {
|
if (Icache.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L0 does't have the line, but we have space for it
|
// L0 does't have the line, but we have space for it
|
||||||
// in the L0 so let's see if the L2 has it
|
// in the L0 so let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
Icache_entry, TBEs[in_msg.LineAddress]);
|
Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L0, so we need to make room in the L0
|
// No room in the L0, so we need to make room in the L0
|
||||||
trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
|
trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
|
||||||
getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
|
getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(Icache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -331,7 +331,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
if (is_valid(Dcache_entry)) {
|
if (is_valid(Dcache_entry)) {
|
||||||
// The tag matches for the L0, so the L0 ask the L1 for it
|
// The tag matches for the L0, so the L0 ask the L1 for it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
Dcache_entry, TBEs[in_msg.LineAddress]);
|
Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L0
|
// Check to see if it is in the OTHER L0
|
||||||
|
@ -339,19 +339,19 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
if (is_valid(Icache_entry)) {
|
if (is_valid(Icache_entry)) {
|
||||||
// The block is in the wrong L0, put the request on the queue to the private L1
|
// The block is in the wrong L0, put the request on the queue to the private L1
|
||||||
trigger(Event:L0_Replacement, in_msg.LineAddress,
|
trigger(Event:L0_Replacement, in_msg.LineAddress,
|
||||||
Icache_entry, TBEs[in_msg.LineAddress]);
|
Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Dcache.cacheAvail(in_msg.LineAddress)) {
|
if (Dcache.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it
|
// L1 does't have the line, but we have space for it
|
||||||
// in the L0 let's see if the L1 has it
|
// in the L0 let's see if the L1 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
Dcache_entry, TBEs[in_msg.LineAddress]);
|
Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L0
|
// No room in the L1, so we need to make room in the L0
|
||||||
trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
|
trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
|
||||||
getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
|
getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(Dcache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -489,7 +489,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
assert(is_valid(cache_entry));
|
assert(is_valid(cache_entry));
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
tbe.DataBlk := cache_entry.DataBlk;
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,7 +161,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
|
|
||||||
// inclusive cache returns L1 entries only
|
// inclusive cache returns L1 entries only
|
||||||
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
|
Entry cache_entry := static_cast(Entry, "pointer", cache.lookup(addr));
|
||||||
return cache_entry;
|
return cache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
||||||
return L1Cache_State_to_permission(tbe.TBEState);
|
return L1Cache_State_to_permission(tbe.TBEState);
|
||||||
|
@ -203,7 +203,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -214,7 +214,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -271,7 +271,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -307,7 +307,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:INV) {
|
if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
|
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
|
||||||
|
@ -343,7 +343,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
if (messageBufferFromL0_in.isReady()) {
|
if (messageBufferFromL0_in.isReady()) {
|
||||||
peek(messageBufferFromL0_in, CoherenceMsg) {
|
peek(messageBufferFromL0_in, CoherenceMsg) {
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if(in_msg.Class == CoherenceClass:INV_DATA) {
|
if(in_msg.Class == CoherenceClass:INV_DATA) {
|
||||||
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
|
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -363,7 +363,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
Entry victim_entry :=
|
Entry victim_entry :=
|
||||||
getCacheEntry(cache.cacheProbe(in_msg.addr));
|
getCacheEntry(cache.cacheProbe(in_msg.addr));
|
||||||
TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
|
TBE victim_tbe := TBEs.lookup(cache.cacheProbe(in_msg.addr));
|
||||||
|
|
||||||
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
|
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
|
||||||
trigger(Event:L0_Invalidate_Own,
|
trigger(Event:L0_Invalidate_Own,
|
||||||
|
@ -628,7 +628,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
assert(is_valid(cache_entry));
|
assert(is_valid(cache_entry));
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
tbe.DataBlk := cache_entry.DataBlk;
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,22 +164,22 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
|
|
||||||
// inclusive cache returns L1 entries only
|
// inclusive cache returns L1 entries only
|
||||||
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
|
||||||
if(is_valid(L1Dcache_entry)) {
|
if(is_valid(L1Dcache_entry)) {
|
||||||
return L1Dcache_entry;
|
return L1Dcache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
|
||||||
return L1Icache_entry;
|
return L1Icache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
|
||||||
return L1Dcache_entry;
|
return L1Dcache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
|
||||||
return L1Icache_entry;
|
return L1Icache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
||||||
return L1Cache_State_to_permission(tbe.TBEState);
|
return L1Cache_State_to_permission(tbe.TBEState);
|
||||||
|
@ -225,7 +225,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -236,7 +236,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -305,7 +305,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// cache. We should drop this request.
|
// cache. We should drop this request.
|
||||||
trigger(prefetch_request_type_to_event(in_msg.Type),
|
trigger(prefetch_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress,
|
in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
|
@ -315,7 +315,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// this request.
|
// this request.
|
||||||
trigger(prefetch_request_type_to_event(in_msg.Type),
|
trigger(prefetch_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress,
|
in_msg.LineAddress,
|
||||||
L1Dcache_entry, TBEs[in_msg.LineAddress]);
|
L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
||||||
|
@ -323,13 +323,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// in the L1 so let's see if the L2 has it
|
// in the L1 so let's see if the L2 has it
|
||||||
trigger(prefetch_request_type_to_event(in_msg.Type),
|
trigger(prefetch_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress,
|
in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement,
|
trigger(Event:L1_Replacement,
|
||||||
L1Icache.cacheProbe(in_msg.LineAddress),
|
L1Icache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Data prefetch
|
// Data prefetch
|
||||||
|
@ -339,7 +339,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// cache. We should drop this request.
|
// cache. We should drop this request.
|
||||||
trigger(prefetch_request_type_to_event(in_msg.Type),
|
trigger(prefetch_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress,
|
in_msg.LineAddress,
|
||||||
L1Dcache_entry, TBEs[in_msg.LineAddress]);
|
L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
|
@ -349,7 +349,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// request.
|
// request.
|
||||||
trigger(prefetch_request_type_to_event(in_msg.Type),
|
trigger(prefetch_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress,
|
in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
||||||
|
@ -357,13 +357,13 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
// the L1 let's see if the L2 has it
|
// the L1 let's see if the L2 has it
|
||||||
trigger(prefetch_request_type_to_event(in_msg.Type),
|
trigger(prefetch_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress,
|
in_msg.LineAddress,
|
||||||
L1Dcache_entry, TBEs[in_msg.LineAddress]);
|
L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement,
|
trigger(Event:L1_Replacement,
|
||||||
L1Dcache.cacheProbe(in_msg.LineAddress),
|
L1Dcache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -377,7 +377,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -417,7 +417,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:INV) {
|
if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -450,7 +450,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
if (is_valid(L1Icache_entry)) {
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 asks the L2 for it.
|
// The tag matches for the L1, so the L1 asks the L2 for it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
|
@ -458,19 +458,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
if (is_valid(L1Dcache_entry)) {
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
||||||
L1Dcache_entry, TBEs[in_msg.LineAddress]);
|
L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it
|
// L1 does't have the line, but we have space for it
|
||||||
// in the L1 so let's see if the L2 has it.
|
// in the L1 so let's see if the L2 has it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
|
trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -480,7 +480,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
if (is_valid(L1Dcache_entry)) {
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 ask the L2 for it
|
// The tag matches for the L1, so the L1 ask the L2 for it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
L1Dcache_entry, TBEs[in_msg.LineAddress]);
|
L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
|
@ -488,19 +488,19 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
if (is_valid(L1Icache_entry)) {
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it
|
// L1 does't have the line, but we have space for it
|
||||||
// in the L1 let's see if the L2 has it.
|
// in the L1 let's see if the L2 has it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
L1Dcache_entry, TBEs[in_msg.LineAddress]);
|
L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
|
trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -847,7 +847,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
assert(is_valid(cache_entry));
|
assert(is_valid(cache_entry));
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.isPrefetch := false;
|
tbe.isPrefetch := false;
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
tbe.DataBlk := cache_entry.DataBlk;
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
|
|
|
@ -157,7 +157,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
|
|
||||||
// inclusive cache, returns L2 entries only
|
// inclusive cache, returns L2 entries only
|
||||||
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
||||||
return static_cast(Entry, "pointer", L2cache[addr]);
|
return static_cast(Entry, "pointer", L2cache.lookup(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
|
bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
|
||||||
|
@ -196,7 +196,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
|
||||||
return L2Cache_State_to_permission(tbe.TBEState);
|
return L2Cache_State_to_permission(tbe.TBEState);
|
||||||
|
@ -213,7 +213,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -224,7 +224,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -288,7 +288,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
if(L1unblockNetwork_in.isReady()) {
|
if(L1unblockNetwork_in.isReady()) {
|
||||||
peek(L1unblockNetwork_in, ResponseMsg) {
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
|
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
|
||||||
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
|
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
|
||||||
in_msg.Sender, in_msg.Type, in_msg.Destination);
|
in_msg.Sender, in_msg.Type, in_msg.Destination);
|
||||||
|
@ -312,7 +312,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
// test wether it's from a local L1 or an off chip source
|
// test wether it's from a local L1 or an off chip source
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||||
if(in_msg.Type == CoherenceResponseType:DATA) {
|
if(in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
|
@ -351,7 +351,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
if(L1RequestL2Network_in.isReady()) {
|
if(L1RequestL2Network_in.isReady()) {
|
||||||
peek(L1RequestL2Network_in, RequestMsg) {
|
peek(L1RequestL2Network_in, RequestMsg) {
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
|
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
|
||||||
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
|
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
|
||||||
|
@ -376,10 +376,10 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
|
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
|
||||||
if (isDirty(L2cache_entry)) {
|
if (isDirty(L2cache_entry)) {
|
||||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
||||||
L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
|
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
|
||||||
L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -591,7 +591,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
assert(is_valid(cache_entry));
|
assert(is_valid(cache_entry));
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.L1_GetS_IDs.clear();
|
tbe.L1_GetS_IDs.clear();
|
||||||
tbe.DataBlk := cache_entry.DataBlk;
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
|
|
|
@ -101,7 +101,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
void wakeUpBuffers(Addr a);
|
void wakeUpBuffers(Addr a);
|
||||||
|
|
||||||
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
|
Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
|
||||||
|
|
||||||
if (is_valid(dir_entry)) {
|
if (is_valid(dir_entry)) {
|
||||||
return dir_entry;
|
return dir_entry;
|
||||||
|
@ -133,7 +133,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
|
||||||
return Directory_State_to_permission(tbe.TBEState);
|
return Directory_State_to_permission(tbe.TBEState);
|
||||||
|
@ -149,7 +149,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -160,7 +160,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -194,13 +194,13 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (isGETRequest(in_msg.Type)) {
|
if (isGETRequest(in_msg.Type)) {
|
||||||
trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Fetch, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
|
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
|
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg);
|
DPRINTF(RubySlicc, "%s\n", in_msg);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -214,9 +214,9 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
||||||
trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Data, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:CleanReplacement, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -230,9 +230,9 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -390,7 +390,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DataBlk := in_msg.DataBlk;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
tbe.PhysicalAddress := in_msg.addr;
|
tbe.PhysicalAddress := in_msg.addr;
|
||||||
tbe.Len := in_msg.Len;
|
tbe.Len := in_msg.Len;
|
||||||
|
|
|
@ -152,7 +152,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
return L1Cache_State_to_permission(tbe.TBEState);
|
return L1Cache_State_to_permission(tbe.TBEState);
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -183,7 +183,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -205,7 +205,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
|
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -231,7 +231,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
|
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:DATA) {
|
if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -254,11 +254,11 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
// make room for the block
|
// make room for the block
|
||||||
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
|
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
|
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(cacheMemory.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
cache_entry, TBEs[in_msg.LineAddress]);
|
cache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -396,7 +396,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
||||||
|
|
|
@ -111,7 +111,7 @@ machine(Directory, "Directory protocol")
|
||||||
void unset_tbe();
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
|
Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
|
||||||
|
|
||||||
if (is_valid(dir_entry)) {
|
if (is_valid(dir_entry)) {
|
||||||
return dir_entry;
|
return dir_entry;
|
||||||
|
@ -155,7 +155,7 @@ machine(Directory, "Directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
return Directory_State_to_permission(tbe.TBEState);
|
return Directory_State_to_permission(tbe.TBEState);
|
||||||
}
|
}
|
||||||
|
@ -174,7 +174,7 @@ machine(Directory, "Directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -185,7 +185,7 @@ machine(Directory, "Directory protocol")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -207,7 +207,7 @@ machine(Directory, "Directory protocol")
|
||||||
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
|
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBE tbe := TBEs[in_msg.LineAddress];
|
TBE tbe := TBEs.lookup(in_msg.LineAddress);
|
||||||
if (in_msg.Type == DMARequestType:READ) {
|
if (in_msg.Type == DMARequestType:READ) {
|
||||||
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
|
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
|
||||||
} else if (in_msg.Type == DMARequestType:WRITE) {
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
||||||
|
@ -222,7 +222,7 @@ machine(Directory, "Directory protocol")
|
||||||
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
||||||
if (requestQueue_in.isReady()) {
|
if (requestQueue_in.isReady()) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:GETS, in_msg.addr, tbe);
|
trigger(Event:GETS, in_msg.addr, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
|
@ -245,7 +245,7 @@ machine(Directory, "Directory protocol")
|
||||||
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
|
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.addr, tbe);
|
trigger(Event:Memory_Data, in_msg.addr, tbe);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
|
@ -403,7 +403,7 @@ machine(Directory, "Directory protocol")
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DataBlk := in_msg.DataBlk;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
||||||
tbe.Len := in_msg.Len;
|
tbe.Len := in_msg.Len;
|
||||||
|
@ -414,7 +414,7 @@ machine(Directory, "Directory protocol")
|
||||||
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
|
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DmaRequestor := in_msg.Requestor;
|
tbe.DmaRequestor := in_msg.Requestor;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -422,7 +422,7 @@ machine(Directory, "Directory protocol")
|
||||||
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
|
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DataBlk := in_msg.DataBlk;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,7 +190,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
||||||
return L1Cache_State_to_permission(tbe.TBEState);
|
return L1Cache_State_to_permission(tbe.TBEState);
|
||||||
|
@ -217,7 +217,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
if(is_valid(cache_entry)) {
|
if(is_valid(cache_entry)) {
|
||||||
testAndRead(addr, cache_entry.DataBlk, pkt);
|
testAndRead(addr, cache_entry.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -236,7 +236,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
return num_functional_writes;
|
return num_functional_writes;
|
||||||
}
|
}
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
return num_functional_writes;
|
return num_functional_writes;
|
||||||
|
@ -269,7 +269,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
if (useTimerTable_in.isReady()) {
|
if (useTimerTable_in.isReady()) {
|
||||||
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
|
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
|
||||||
getCacheEntry(useTimerTable.readyAddress()),
|
getCacheEntry(useTimerTable.readyAddress()),
|
||||||
TBEs[useTimerTable.readyAddress()]);
|
TBEs.lookup(useTimerTable.readyAddress()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_acks, in_msg.addr,
|
trigger(Event:All_acks, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -299,29 +299,29 @@ machine(L1Cache, "Directory protocol")
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
|
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||||
trigger(Event:Own_GETX, in_msg.addr,
|
trigger(Event:Own_GETX, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Fwd_GETX, in_msg.addr,
|
trigger(Event:Fwd_GETX, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:Fwd_GETS, in_msg.addr,
|
trigger(Event:Fwd_GETS, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:Fwd_DMA, in_msg.addr,
|
trigger(Event:Fwd_DMA, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||||
trigger(Event:Writeback_Ack, in_msg.addr,
|
trigger(Event:Writeback_Ack, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
|
||||||
trigger(Event:Writeback_Ack_Data, in_msg.addr,
|
trigger(Event:Writeback_Ack_Data, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||||
trigger(Event:Writeback_Nack, in_msg.addr,
|
trigger(Event:Writeback_Nack, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Inv, in_msg.addr,
|
trigger(Event:Inv, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -335,13 +335,13 @@ machine(L1Cache, "Directory protocol")
|
||||||
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
|
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack, in_msg.addr,
|
trigger(Event:Ack, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.addr,
|
trigger(Event:Data, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Data, in_msg.addr,
|
trigger(Event:Exclusive_Data, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -365,7 +365,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
// The tag matches for the L1, so the L1 asks the L2 for it.
|
// The tag matches for the L1, so the L1 asks the L2 for it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type),
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress, L1Icache_entry,
|
in_msg.LineAddress, L1Icache_entry,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
|
@ -373,19 +373,19 @@ machine(L1Cache, "Directory protocol")
|
||||||
if (is_valid(L1Dcache_entry)) {
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
|
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type),
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress, L1Icache_entry,
|
in_msg.LineAddress, L1Icache_entry,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement,
|
trigger(Event:L1_Replacement,
|
||||||
L1Icache.cacheProbe(in_msg.LineAddress),
|
L1Icache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
// The tag matches for the L1, so the L1 ask the L2 for it
|
// The tag matches for the L1, so the L1 ask the L2 for it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type),
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress, L1Dcache_entry,
|
in_msg.LineAddress, L1Dcache_entry,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
|
@ -404,19 +404,19 @@ machine(L1Cache, "Directory protocol")
|
||||||
if (is_valid(L1Icache_entry)) {
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
||||||
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type),
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
in_msg.LineAddress, L1Dcache_entry,
|
in_msg.LineAddress, L1Dcache_entry,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement,
|
trigger(Event:L1_Replacement,
|
||||||
L1Dcache.cacheProbe(in_msg.LineAddress),
|
L1Dcache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
||||||
TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
|
TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -664,7 +664,7 @@ machine(L1Cache, "Directory protocol")
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
assert(is_valid(cache_entry));
|
assert(is_valid(cache_entry));
|
||||||
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
|
|
|
@ -232,7 +232,7 @@ machine(L2Cache, "Token protocol")
|
||||||
void unset_tbe();
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
|
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
|
||||||
return static_cast(Entry, "pointer", L2cache[address]);
|
return static_cast(Entry, "pointer", L2cache.lookup(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isDirTagPresent(Addr addr) {
|
bool isDirTagPresent(Addr addr) {
|
||||||
|
@ -519,7 +519,7 @@ machine(L2Cache, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
|
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
|
||||||
return L2Cache_State_to_permission(tbe.TBEState);
|
return L2Cache_State_to_permission(tbe.TBEState);
|
||||||
|
@ -542,7 +542,7 @@ machine(L2Cache, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -553,7 +553,7 @@ machine(L2Cache, "Token protocol")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -582,7 +582,7 @@ machine(L2Cache, "Token protocol")
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_Acks, in_msg.addr,
|
trigger(Event:All_Acks, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -598,26 +598,26 @@ machine(L2Cache, "Token protocol")
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
if (in_msg.Requestor == machineID) {
|
if (in_msg.Requestor == machineID) {
|
||||||
trigger(Event:Own_GETX, in_msg.addr,
|
trigger(Event:Own_GETX, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Fwd_GETX, in_msg.addr,
|
trigger(Event:Fwd_GETX, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:Fwd_GETS, in_msg.addr,
|
trigger(Event:Fwd_GETS, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:Fwd_DMA, in_msg.addr,
|
trigger(Event:Fwd_DMA, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Inv, in_msg.addr,
|
trigger(Event:Inv, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||||
trigger(Event:Writeback_Ack, in_msg.addr,
|
trigger(Event:Writeback_Ack, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||||
trigger(Event:Writeback_Nack, in_msg.addr,
|
trigger(Event:Writeback_Nack, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -631,25 +631,25 @@ machine(L2Cache, "Token protocol")
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:L1_GETX, in_msg.addr,
|
trigger(Event:L1_GETX, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:L1_GETS, in_msg.addr,
|
trigger(Event:L1_GETS, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
||||||
trigger(Event:L1_PUTO, in_msg.addr,
|
trigger(Event:L1_PUTO, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||||
trigger(Event:L1_PUTX, in_msg.addr,
|
trigger(Event:L1_PUTX, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
|
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
|
||||||
trigger(Event:L1_PUTS_only, in_msg.addr,
|
trigger(Event:L1_PUTS_only, in_msg.addr,
|
||||||
cache_entry, TBEs[in_msg.addr]);
|
cache_entry, TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:L1_PUTS, in_msg.addr,
|
trigger(Event:L1_PUTS, in_msg.addr,
|
||||||
cache_entry, TBEs[in_msg.addr]);
|
cache_entry, TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
|
@ -667,35 +667,35 @@ machine(L2Cache, "Token protocol")
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
if (in_msg.SenderMachine == MachineType:L2Cache) {
|
if (in_msg.SenderMachine == MachineType:L2Cache) {
|
||||||
trigger(Event:ExtAck, in_msg.addr,
|
trigger(Event:ExtAck, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:IntAck, in_msg.addr,
|
trigger(Event:IntAck, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.addr,
|
trigger(Event:Data, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data_Exclusive, in_msg.addr,
|
trigger(Event:Data_Exclusive, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||||
trigger(Event:Unblock, in_msg.addr,
|
trigger(Event:Unblock, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Unblock, in_msg.addr,
|
trigger(Event:Exclusive_Unblock, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
if (is_invalid(cache_entry) &&
|
if (is_invalid(cache_entry) &&
|
||||||
L2cache.cacheAvail(in_msg.addr) == false) {
|
L2cache.cacheAvail(in_msg.addr) == false) {
|
||||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
||||||
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
|
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
|
||||||
TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
|
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
|
||||||
cache_entry, TBEs[in_msg.addr]);
|
cache_entry, TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
|
@ -703,15 +703,15 @@ machine(L2Cache, "Token protocol")
|
||||||
L2cache.cacheAvail(in_msg.addr) == false) {
|
L2cache.cacheAvail(in_msg.addr) == false) {
|
||||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
||||||
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
|
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
|
||||||
TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
|
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
|
||||||
cache_entry, TBEs[in_msg.addr]);
|
cache_entry, TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||||
trigger(Event:DmaAck, in_msg.addr,
|
trigger(Event:DmaAck, in_msg.addr,
|
||||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -1223,7 +1223,7 @@ machine(L2Cache, "Token protocol")
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
|
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
if(is_valid(cache_entry)) {
|
if(is_valid(cache_entry)) {
|
||||||
tbe.DataBlk := cache_entry.DataBlk;
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
|
|
|
@ -122,7 +122,7 @@ machine(Directory, "Directory protocol")
|
||||||
void unset_tbe();
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
|
Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
|
||||||
|
|
||||||
if (is_valid(dir_entry)) {
|
if (is_valid(dir_entry)) {
|
||||||
return dir_entry;
|
return dir_entry;
|
||||||
|
@ -234,26 +234,26 @@ machine(Directory, "Directory protocol")
|
||||||
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||||
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
|
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
|
||||||
trigger(Event:Last_Unblock, in_msg.addr,
|
trigger(Event:Last_Unblock, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Unblock, in_msg.addr,
|
trigger(Event:Unblock, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Unblock, in_msg.addr,
|
trigger(Event:Exclusive_Unblock, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||||||
trigger(Event:Dirty_Writeback, in_msg.addr,
|
trigger(Event:Dirty_Writeback, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
||||||
trigger(Event:Clean_Writeback, in_msg.addr,
|
trigger(Event:Clean_Writeback, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data, in_msg.addr,
|
trigger(Event:Data, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||||
trigger(Event:DMA_ACK, in_msg.addr,
|
trigger(Event:DMA_ACK, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -265,21 +265,21 @@ machine(Directory, "Directory protocol")
|
||||||
if (requestQueue_in.isReady()) {
|
if (requestQueue_in.isReady()) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||||
trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:PUTX, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
||||||
trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:PUTO, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
||||||
trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
|
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
|
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -292,9 +292,9 @@ machine(Directory, "Directory protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -540,7 +540,7 @@ machine(Directory, "Directory protocol")
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
||||||
peek (requestQueue_in, RequestMsg) {
|
peek (requestQueue_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.PhysicalAddress := in_msg.addr;
|
tbe.PhysicalAddress := in_msg.addr;
|
||||||
tbe.Len := in_msg.Len;
|
tbe.Len := in_msg.Len;
|
||||||
tbe.DataBlk := in_msg.DataBlk;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
|
|
|
@ -108,10 +108,10 @@ machine(DMA, "DMA Controller")
|
||||||
peek(dmaRequestQueue_in, SequencerMsg) {
|
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||||
if (in_msg.Type == SequencerRequestType:LD ) {
|
if (in_msg.Type == SequencerRequestType:LD ) {
|
||||||
trigger(Event:ReadRequest, in_msg.LineAddress,
|
trigger(Event:ReadRequest, in_msg.LineAddress,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else if (in_msg.Type == SequencerRequestType:ST) {
|
} else if (in_msg.Type == SequencerRequestType:ST) {
|
||||||
trigger(Event:WriteRequest, in_msg.LineAddress,
|
trigger(Event:WriteRequest, in_msg.LineAddress,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
error("Invalid request type");
|
error("Invalid request type");
|
||||||
}
|
}
|
||||||
|
@ -124,14 +124,14 @@ machine(DMA, "DMA Controller")
|
||||||
peek( dmaResponseQueue_in, ResponseMsg) {
|
peek( dmaResponseQueue_in, ResponseMsg) {
|
||||||
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||||
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
|
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
||||||
in_msg.Type == CoherenceResponseType:DATA) {
|
in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, makeLineAddress(in_msg.addr),
|
trigger(Event:Data, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
|
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
|
||||||
TBEs[makeLineAddress(in_msg.addr)]);
|
TBEs.lookup(makeLineAddress(in_msg.addr)));
|
||||||
} else {
|
} else {
|
||||||
error("Invalid response type");
|
error("Invalid response type");
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ machine(DMA, "DMA Controller")
|
||||||
if (triggerQueue_in.isReady()) {
|
if (triggerQueue_in.isReady()) {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ machine(DMA, "DMA Controller")
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
||||||
|
|
|
@ -366,7 +366,7 @@ machine(L1Cache, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := L1_TBEs[addr];
|
TBE tbe := L1_TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
return L1Cache_State_to_permission(tbe.TBEState);
|
return L1Cache_State_to_permission(tbe.TBEState);
|
||||||
}
|
}
|
||||||
|
@ -459,7 +459,7 @@ machine(L1Cache, "Token protocol")
|
||||||
// Use Timer
|
// Use Timer
|
||||||
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
|
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
|
||||||
if (useTimerTable_in.isReady()) {
|
if (useTimerTable_in.isReady()) {
|
||||||
TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
|
TBE tbe := L1_TBEs.lookup(useTimerTable.readyAddress());
|
||||||
|
|
||||||
if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
|
if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
|
||||||
(persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
|
(persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
|
||||||
|
@ -487,7 +487,7 @@ machine(L1Cache, "Token protocol")
|
||||||
if (reissueTimerTable_in.isReady()) {
|
if (reissueTimerTable_in.isReady()) {
|
||||||
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
|
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
|
||||||
getCacheEntry(reissueTimerTable.readyAddress()),
|
getCacheEntry(reissueTimerTable.readyAddress()),
|
||||||
L1_TBEs[reissueTimerTable.readyAddress()]);
|
L1_TBEs.lookup(reissueTimerTable.readyAddress()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,7 +510,7 @@ machine(L1Cache, "Token protocol")
|
||||||
|
|
||||||
// React to the message based on the current state of the table
|
// React to the message based on the current state of the table
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := L1_TBEs[in_msg.addr];
|
TBE tbe := L1_TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (persistentTable.isLocked(in_msg.addr)) {
|
if (persistentTable.isLocked(in_msg.addr)) {
|
||||||
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
||||||
|
@ -548,7 +548,7 @@ machine(L1Cache, "Token protocol")
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := L1_TBEs[in_msg.addr];
|
TBE tbe := L1_TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
|
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
|
||||||
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
|
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
|
||||||
|
@ -559,7 +559,7 @@ machine(L1Cache, "Token protocol")
|
||||||
|
|
||||||
// came from an off-chip L2 cache
|
// came from an off-chip L2 cache
|
||||||
if (is_valid(tbe)) {
|
if (is_valid(tbe)) {
|
||||||
// L1_TBEs[in_msg.addr].ExternalResponse := true;
|
// L1_TBEs.lookup(in_msg.addr).ExternalResponse := true;
|
||||||
// profile_offchipL2_response(in_msg.addr);
|
// profile_offchipL2_response(in_msg.addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -619,7 +619,7 @@ machine(L1Cache, "Token protocol")
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := L1_TBEs[in_msg.addr];
|
TBE tbe := L1_TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
if (in_msg.isLocal) {
|
if (in_msg.isLocal) {
|
||||||
|
@ -665,7 +665,7 @@ machine(L1Cache, "Token protocol")
|
||||||
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
|
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
|
||||||
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
||||||
|
|
||||||
TBE tbe := L1_TBEs[in_msg.LineAddress];
|
TBE tbe := L1_TBEs.lookup(in_msg.LineAddress);
|
||||||
|
|
||||||
if (in_msg.Type == RubyRequestType:IFETCH) {
|
if (in_msg.Type == RubyRequestType:IFETCH) {
|
||||||
// ** INSTRUCTION ACCESS ***
|
// ** INSTRUCTION ACCESS ***
|
||||||
|
@ -695,7 +695,7 @@ machine(L1Cache, "Token protocol")
|
||||||
trigger(Event:L1_Replacement,
|
trigger(Event:L1_Replacement,
|
||||||
L1Icache.cacheProbe(in_msg.LineAddress),
|
L1Icache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
||||||
L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
|
L1_TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -726,7 +726,7 @@ machine(L1Cache, "Token protocol")
|
||||||
trigger(Event:L1_Replacement,
|
trigger(Event:L1_Replacement,
|
||||||
L1Dcache.cacheProbe(in_msg.LineAddress),
|
L1Dcache.cacheProbe(in_msg.LineAddress),
|
||||||
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
||||||
L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
|
L1_TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1332,7 +1332,7 @@ machine(L1Cache, "Token protocol")
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
||||||
check_allocate(L1_TBEs);
|
check_allocate(L1_TBEs);
|
||||||
L1_TBEs.allocate(address);
|
L1_TBEs.allocate(address);
|
||||||
set_tbe(L1_TBEs[address]);
|
set_tbe(L1_TBEs.lookup(address));
|
||||||
tbe.IssueCount := 0;
|
tbe.IssueCount := 0;
|
||||||
peek(mandatoryQueue_in, RubyRequest) {
|
peek(mandatoryQueue_in, RubyRequest) {
|
||||||
tbe.PC := in_msg.ProgramCounter;
|
tbe.PC := in_msg.ProgramCounter;
|
||||||
|
|
|
@ -175,7 +175,7 @@ machine(Directory, "Token protocol")
|
||||||
void unset_tbe();
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
|
Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
|
||||||
|
|
||||||
if (is_valid(dir_entry)) {
|
if (is_valid(dir_entry)) {
|
||||||
return dir_entry;
|
return dir_entry;
|
||||||
|
@ -218,7 +218,7 @@ machine(Directory, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
return Directory_State_to_permission(tbe.TBEState);
|
return Directory_State_to_permission(tbe.TBEState);
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ machine(Directory, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -256,7 +256,7 @@ machine(Directory, "Token protocol")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -280,9 +280,9 @@ machine(Directory, "Token protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -295,7 +295,7 @@ machine(Directory, "Token protocol")
|
||||||
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
|
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
|
||||||
if (reissueTimerTable_in.isReady()) {
|
if (reissueTimerTable_in.isReady()) {
|
||||||
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
|
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
|
||||||
TBEs[reissueTimerTable.readyAddress()]);
|
TBEs.lookup(reissueTimerTable.readyAddress()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,13 +307,13 @@ machine(Directory, "Token protocol")
|
||||||
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
|
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
|
||||||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
||||||
trigger(Event:Data_All_Tokens, in_msg.addr,
|
trigger(Event:Data_All_Tokens, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
||||||
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
|
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack_All_Tokens, in_msg.addr,
|
trigger(Event:Ack_All_Tokens, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -321,14 +321,14 @@ machine(Directory, "Token protocol")
|
||||||
} else {
|
} else {
|
||||||
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
||||||
trigger(Event:Data_Owner, in_msg.addr,
|
trigger(Event:Data_Owner, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
|
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
|
||||||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
||||||
trigger(Event:Tokens, in_msg.addr,
|
trigger(Event:Tokens, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
||||||
trigger(Event:Ack_Owner, in_msg.addr,
|
trigger(Event:Ack_Owner, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -360,38 +360,38 @@ machine(Directory, "Token protocol")
|
||||||
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
||||||
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
|
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
|
||||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
|
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// locked
|
// locked
|
||||||
trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// unlocked
|
// unlocked
|
||||||
trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
||||||
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
|
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
|
||||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
|
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
||||||
TBEs[in_msg.addr]);
|
TBEs.lookup(in_msg.addr));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
||||||
// locked
|
// locked
|
||||||
trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
||||||
// locked
|
// locked
|
||||||
trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
||||||
// unlocked
|
// unlocked
|
||||||
trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -405,9 +405,9 @@ machine(Directory, "Token protocol")
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
|
trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -419,14 +419,14 @@ machine(Directory, "Token protocol")
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
if (in_msg.Type == DMARequestType:READ) {
|
if (in_msg.Type == DMARequestType:READ) {
|
||||||
trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
|
trigger(Event:DMA_READ, in_msg.LineAddress, TBEs.lookup(in_msg.LineAddress));
|
||||||
} else if (in_msg.Type == DMARequestType:WRITE) {
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
||||||
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
|
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
|
||||||
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
|
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:DMA_WRITE, in_msg.LineAddress,
|
trigger(Event:DMA_WRITE, in_msg.LineAddress,
|
||||||
TBEs[in_msg.LineAddress]);
|
TBEs.lookup(in_msg.LineAddress));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -691,7 +691,7 @@ machine(Directory, "Token protocol")
|
||||||
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DataBlk := in_msg.DataBlk;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
||||||
tbe.Len := in_msg.Len;
|
tbe.Len := in_msg.Len;
|
||||||
|
|
|
@ -210,7 +210,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
if(is_valid(cache_entry)) {
|
if(is_valid(cache_entry)) {
|
||||||
testAndRead(addr, cache_entry.DataBlk, pkt);
|
testAndRead(addr, cache_entry.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -229,7 +229,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
return num_functional_writes;
|
return num_functional_writes;
|
||||||
}
|
}
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
return num_functional_writes;
|
return num_functional_writes;
|
||||||
|
@ -274,7 +274,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
return L1Cache_State_to_permission(tbe.TBEState);
|
return L1Cache_State_to_permission(tbe.TBEState);
|
||||||
}
|
}
|
||||||
|
@ -337,7 +337,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == TriggerType:L2_to_L1) {
|
if (in_msg.Type == TriggerType:L2_to_L1) {
|
||||||
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -360,7 +360,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
peek(responseToCache_in, ResponseMsg, block_on="addr") {
|
peek(responseToCache_in, ResponseMsg, block_on="addr") {
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||||
|
@ -385,7 +385,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
peek(forwardToCache_in, RequestMsg, block_on="addr") {
|
peek(forwardToCache_in, RequestMsg, block_on="addr") {
|
||||||
|
|
||||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
|
|
||||||
if ((in_msg.Type == CoherenceRequestType:GETX) ||
|
if ((in_msg.Type == CoherenceRequestType:GETX) ||
|
||||||
(in_msg.Type == CoherenceRequestType:GETF)) {
|
(in_msg.Type == CoherenceRequestType:GETF)) {
|
||||||
|
@ -429,7 +429,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
|
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
|
||||||
|
|
||||||
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
||||||
TBE tbe := TBEs[in_msg.LineAddress];
|
TBE tbe := TBEs.lookup(in_msg.LineAddress);
|
||||||
|
|
||||||
if (in_msg.Type == RubyRequestType:IFETCH) {
|
if (in_msg.Type == RubyRequestType:IFETCH) {
|
||||||
// ** INSTRUCTION ACCESS ***
|
// ** INSTRUCTION ACCESS ***
|
||||||
|
@ -452,7 +452,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
trigger(Event:L2_Replacement,
|
trigger(Event:L2_Replacement,
|
||||||
l2_victim_addr,
|
l2_victim_addr,
|
||||||
getL2CacheEntry(l2_victim_addr),
|
getL2CacheEntry(l2_victim_addr),
|
||||||
TBEs[l2_victim_addr]);
|
TBEs.lookup(l2_victim_addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,14 +477,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
trigger(Event:L1_to_L2,
|
trigger(Event:L1_to_L2,
|
||||||
l1i_victim_addr,
|
l1i_victim_addr,
|
||||||
getL1ICacheEntry(l1i_victim_addr),
|
getL1ICacheEntry(l1i_victim_addr),
|
||||||
TBEs[l1i_victim_addr]);
|
TBEs.lookup(l1i_victim_addr));
|
||||||
} else {
|
} else {
|
||||||
Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
|
Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
|
||||||
// The L2 does not have room, so we replace a line from the L2
|
// The L2 does not have room, so we replace a line from the L2
|
||||||
trigger(Event:L2_Replacement,
|
trigger(Event:L2_Replacement,
|
||||||
l2_victim_addr,
|
l2_victim_addr,
|
||||||
getL2CacheEntry(l2_victim_addr),
|
getL2CacheEntry(l2_victim_addr),
|
||||||
TBEs[l2_victim_addr]);
|
TBEs.lookup(l2_victim_addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -510,7 +510,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
trigger(Event:L2_Replacement,
|
trigger(Event:L2_Replacement,
|
||||||
l2_victim_addr,
|
l2_victim_addr,
|
||||||
getL2CacheEntry(l2_victim_addr),
|
getL2CacheEntry(l2_victim_addr),
|
||||||
TBEs[l2_victim_addr]);
|
TBEs.lookup(l2_victim_addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,14 +534,14 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
trigger(Event:L1_to_L2,
|
trigger(Event:L1_to_L2,
|
||||||
l1d_victim_addr,
|
l1d_victim_addr,
|
||||||
getL1DCacheEntry(l1d_victim_addr),
|
getL1DCacheEntry(l1d_victim_addr),
|
||||||
TBEs[l1d_victim_addr]);
|
TBEs.lookup(l1d_victim_addr));
|
||||||
} else {
|
} else {
|
||||||
Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
|
Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
|
||||||
// The L2 does not have room, so we replace a line from the L2
|
// The L2 does not have room, so we replace a line from the L2
|
||||||
trigger(Event:L2_Replacement,
|
trigger(Event:L2_Replacement,
|
||||||
l2_victim_addr,
|
l2_victim_addr,
|
||||||
getL2CacheEntry(l2_victim_addr),
|
getL2CacheEntry(l2_victim_addr),
|
||||||
TBEs[l2_victim_addr]);
|
TBEs.lookup(l2_victim_addr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -926,7 +926,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
assert(is_valid(cache_entry));
|
assert(is_valid(cache_entry));
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
||||||
tbe.Dirty := cache_entry.Dirty;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
tbe.Sharers := false;
|
tbe.Sharers := false;
|
||||||
|
@ -935,7 +935,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
||||||
action(it_allocateTBE, "it", desc="Allocate TBE") {
|
action(it_allocateTBE, "it", desc="Allocate TBE") {
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.Dirty := false;
|
tbe.Dirty := false;
|
||||||
tbe.Sharers := false;
|
tbe.Sharers := false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
|
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
|
||||||
|
|
||||||
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
|
||||||
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
|
Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
|
||||||
|
|
||||||
if (is_valid(dir_entry)) {
|
if (is_valid(dir_entry)) {
|
||||||
return dir_entry;
|
return dir_entry;
|
||||||
|
@ -250,7 +250,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessPermission getAccessPermission(Addr addr) {
|
AccessPermission getAccessPermission(Addr addr) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
return Directory_State_to_permission(tbe.TBEState);
|
return Directory_State_to_permission(tbe.TBEState);
|
||||||
}
|
}
|
||||||
|
@ -267,7 +267,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
void functionalRead(Addr addr, Packet *pkt) {
|
void functionalRead(Addr addr, Packet *pkt) {
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
testAndRead(addr, tbe.DataBlk, pkt);
|
testAndRead(addr, tbe.DataBlk, pkt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -278,7 +278,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
int functionalWrite(Addr addr, Packet *pkt) {
|
int functionalWrite(Addr addr, Packet *pkt) {
|
||||||
int num_functional_writes := 0;
|
int num_functional_writes := 0;
|
||||||
|
|
||||||
TBE tbe := TBEs[addr];
|
TBE tbe := TBEs.lookup(addr);
|
||||||
if(is_valid(tbe)) {
|
if(is_valid(tbe)) {
|
||||||
num_functional_writes := num_functional_writes +
|
num_functional_writes := num_functional_writes +
|
||||||
testAndWrite(addr, tbe.DataBlk, pkt);
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
||||||
|
@ -317,7 +317,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (triggerQueue_in.isReady()) {
|
if (triggerQueue_in.isReady()) {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_acks_and_owner_data, in_msg.addr,
|
trigger(Event:All_acks_and_owner_data, in_msg.addr,
|
||||||
pf_entry, tbe);
|
pf_entry, tbe);
|
||||||
|
@ -341,7 +341,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (unblockNetwork_in.isReady()) {
|
if (unblockNetwork_in.isReady()) {
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||||
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
|
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
||||||
|
@ -370,7 +370,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (responseToDir_in.isReady()) {
|
if (responseToDir_in.isReady()) {
|
||||||
peek(responseToDir_in, ResponseMsg) {
|
peek(responseToDir_in, ResponseMsg) {
|
||||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
|
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
||||||
|
@ -393,7 +393,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
|
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
|
@ -410,7 +410,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (requestQueue_in.isReady()) {
|
if (requestQueue_in.isReady()) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||||
TBE tbe := TBEs[in_msg.addr];
|
TBE tbe := TBEs.lookup(in_msg.addr);
|
||||||
if (in_msg.Type == CoherenceRequestType:PUT) {
|
if (in_msg.Type == CoherenceRequestType:PUT) {
|
||||||
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
|
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
|
||||||
|
@ -428,7 +428,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
trigger(Event:Pf_Replacement,
|
trigger(Event:Pf_Replacement,
|
||||||
probeFilter.cacheProbe(in_msg.addr),
|
probeFilter.cacheProbe(in_msg.addr),
|
||||||
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
|
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
|
||||||
TBEs[probeFilter.cacheProbe(in_msg.addr)]);
|
TBEs.lookup(probeFilter.cacheProbe(in_msg.addr)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -444,7 +444,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
|
||||||
TBE tbe := TBEs[in_msg.LineAddress];
|
TBE tbe := TBEs.lookup(in_msg.LineAddress);
|
||||||
if (in_msg.Type == DMARequestType:READ) {
|
if (in_msg.Type == DMARequestType:READ) {
|
||||||
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
|
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == DMARequestType:WRITE) {
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
||||||
|
@ -567,7 +567,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.PhysicalAddress := address;
|
tbe.PhysicalAddress := address;
|
||||||
tbe.ResponseType := CoherenceResponseType:NULL;
|
tbe.ResponseType := CoherenceResponseType:NULL;
|
||||||
}
|
}
|
||||||
|
@ -577,7 +577,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
set_tbe(TBEs[address]);
|
set_tbe(TBEs.lookup(address));
|
||||||
tbe.DmaDataBlk := in_msg.DataBlk;
|
tbe.DmaDataBlk := in_msg.DataBlk;
|
||||||
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
||||||
tbe.Len := in_msg.Len;
|
tbe.Len := in_msg.Len;
|
||||||
|
|
|
@ -671,10 +671,6 @@ class SLICC(Grammar):
|
||||||
"aexpr : aexpr DOT ident '(' exprs ')'"
|
"aexpr : aexpr DOT ident '(' exprs ')'"
|
||||||
p[0] = ast.MemberMethodCallExprAST(self, p[1], p[3], p[5])
|
p[0] = ast.MemberMethodCallExprAST(self, p[1], p[3], p[5])
|
||||||
|
|
||||||
def p_expr__member_method_call_lookup(self, p):
|
|
||||||
"aexpr : aexpr '[' exprs ']'"
|
|
||||||
p[0] = ast.MemberMethodCallExprAST(self, p[1], "lookup", p[3])
|
|
||||||
|
|
||||||
def p_expr__class_method_call(self, p):
|
def p_expr__class_method_call(self, p):
|
||||||
"aexpr : type DOUBLE_COLON ident '(' exprs ')'"
|
"aexpr : type DOUBLE_COLON ident '(' exprs ')'"
|
||||||
p[0] = ast.ClassMethodCallExprAST(self, p[1], p[3], p[5])
|
p[0] = ast.ClassMethodCallExprAST(self, p[1], p[3], p[5])
|
||||||
|
|
Loading…
Reference in a new issue