Change interface between coherence protocols and CacheMemory
The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
This commit is contained in:
parent
6fb521faba
commit
c82a8979a3
36 changed files with 2598 additions and 1657 deletions
|
@ -36,8 +36,6 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
int l1_response_latency = 2,
|
int l1_response_latency = 2,
|
||||||
int to_l2_latency = 1
|
int to_l2_latency = 1
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
// NODE L1 CACHE
|
// NODE L1 CACHE
|
||||||
// From this node's L1 cache TO the network
|
// From this node's L1 cache TO the network
|
||||||
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
|
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
|
||||||
|
@ -135,65 +133,63 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
int cache_state_to_int(State state);
|
int cache_state_to_int(State state);
|
||||||
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
||||||
|
|
||||||
|
void set_cache_entry(AbstractCacheEntry a);
|
||||||
|
void unset_cache_entry();
|
||||||
|
void set_tbe(TBE a);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
// inclusive cache returns L1 entries only
|
// inclusive cache returns L1 entries only
|
||||||
Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
|
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
if (L1DcacheMemory.isTagPresent(addr)) {
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
|
||||||
return static_cast(Entry, L1DcacheMemory[addr]);
|
if(is_valid(L1Dcache_entry)) {
|
||||||
} else {
|
return L1Dcache_entry;
|
||||||
return static_cast(Entry, L1IcacheMemory[addr]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void changeL1Permission(Address addr, AccessPermission permission) {
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
|
||||||
if (L1DcacheMemory.isTagPresent(addr)) {
|
return L1Icache_entry;
|
||||||
return L1DcacheMemory.changePermission(addr, permission);
|
|
||||||
} else if(L1IcacheMemory.isTagPresent(addr)) {
|
|
||||||
return L1IcacheMemory.changePermission(addr, permission);
|
|
||||||
} else {
|
|
||||||
error("cannot change permission, L1 block not present");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isL1CacheTagPresent(Address addr) {
|
Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
|
||||||
|
return L1Dcache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
// if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
|
||||||
// DEBUG_EXPR(id);
|
return L1Icache_entry;
|
||||||
// DEBUG_EXPR(addr);
|
}
|
||||||
// }
|
|
||||||
|
State getState(TBE tbe, Entry cache_entry, Address addr) {
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
||||||
|
|
||||||
if(L1_TBEs.isPresent(addr)) {
|
if(is_valid(tbe)) {
|
||||||
return L1_TBEs[addr].TBEState;
|
return tbe.TBEState;
|
||||||
} else if (isL1CacheTagPresent(addr)) {
|
} else if (is_valid(cache_entry)) {
|
||||||
return getL1CacheEntry(addr).CacheState;
|
return cache_entry.CacheState;
|
||||||
}
|
}
|
||||||
return State:NP;
|
return State:NP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
|
||||||
void setState(Address addr, State state) {
|
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
||||||
|
|
||||||
// MUST CHANGE
|
// MUST CHANGE
|
||||||
if(L1_TBEs.isPresent(addr)) {
|
if(is_valid(tbe)) {
|
||||||
L1_TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isL1CacheTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
getL1CacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
|
|
||||||
// Set permission
|
// Set permission
|
||||||
if (state == State:I) {
|
if (state == State:I) {
|
||||||
changeL1Permission(addr, AccessPermission:Invalid);
|
cache_entry.changePermission(AccessPermission:Invalid);
|
||||||
} else if (state == State:S || state == State:E) {
|
} else if (state == State:S || state == State:E) {
|
||||||
changeL1Permission(addr, AccessPermission:Read_Only);
|
cache_entry.changePermission(AccessPermission:Read_Only);
|
||||||
} else if (state == State:M) {
|
} else if (state == State:M) {
|
||||||
changeL1Permission(addr, AccessPermission:Read_Write);
|
cache_entry.changePermission(AccessPermission:Read_Write);
|
||||||
} else {
|
} else {
|
||||||
changeL1Permission(addr, AccessPermission:Busy);
|
cache_entry.changePermission(AccessPermission:Busy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -210,6 +206,9 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int getPendingAcks(TBE tbe) {
|
||||||
|
return tbe.pendingAcks;
|
||||||
|
}
|
||||||
|
|
||||||
out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
|
out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
|
||||||
out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
|
out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
|
||||||
|
@ -220,27 +219,32 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
if (responseIntraChipL1Network_in.isReady()) {
|
if (responseIntraChipL1Network_in.isReady()) {
|
||||||
peek(responseIntraChipL1Network_in, ResponseMsg, block_on="Address") {
|
peek(responseIntraChipL1Network_in, ResponseMsg, block_on="Address") {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := L1_TBEs[in_msg.Address];
|
||||||
|
|
||||||
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data_Exclusive, in_msg.Address);
|
trigger(Event:Data_Exclusive, in_msg.Address, cache_entry, tbe);
|
||||||
} else if(in_msg.Type == CoherenceResponseType:DATA) {
|
} else if(in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
|
if ((getState(tbe, cache_entry, in_msg.Address) == State:IS ||
|
||||||
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
|
getState(tbe, cache_entry, in_msg.Address) == State:IS_I) &&
|
||||||
|
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||||
|
|
||||||
trigger(Event:DataS_fromL1, in_msg.Address);
|
trigger(Event:DataS_fromL1, in_msg.Address, cache_entry, tbe);
|
||||||
|
|
||||||
} else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
|
} else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
|
||||||
trigger(Event:Data_all_Acks, in_msg.Address);
|
trigger(Event:Data_all_Acks, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
|
if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
|
||||||
trigger(Event:Ack_all, in_msg.Address);
|
trigger(Event:Ack_all, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Ack, in_msg.Address);
|
trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
|
||||||
trigger(Event:WB_Ack, in_msg.Address);
|
trigger(Event:WB_Ack, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid L1 response type");
|
error("Invalid L1 response type");
|
||||||
}
|
}
|
||||||
|
@ -253,15 +257,19 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
if(requestIntraChipL1Network_in.isReady()) {
|
if(requestIntraChipL1Network_in.isReady()) {
|
||||||
peek(requestIntraChipL1Network_in, RequestMsg, block_on="Address") {
|
peek(requestIntraChipL1Network_in, RequestMsg, block_on="Address") {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := L1_TBEs[in_msg.Address];
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:INV) {
|
if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Inv, in_msg.Address);
|
trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
|
||||||
// upgrade transforms to GETX due to race
|
// upgrade transforms to GETX due to race
|
||||||
trigger(Event:Fwd_GETX, in_msg.Address);
|
trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:Fwd_GETS, in_msg.Address);
|
trigger(Event:Fwd_GETS, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
|
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
|
||||||
trigger(Event:Fwd_GET_INSTR, in_msg.Address);
|
trigger(Event:Fwd_GET_INSTR, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid forwarded request type");
|
error("Invalid forwarded request type");
|
||||||
}
|
}
|
||||||
|
@ -280,40 +288,55 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
// ** INSTRUCTION ACCESS ***
|
// ** INSTRUCTION ACCESS ***
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress);
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
||||||
|
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
|
||||||
}
|
}
|
||||||
if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
|
||||||
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 asks the L2 for it.
|
// The tag matches for the L1, so the L1 asks the L2 for it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
|
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
|
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// *** DATA ACCESS ***
|
// *** DATA ACCESS ***
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress);
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
||||||
|
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
|
||||||
}
|
}
|
||||||
if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
|
||||||
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 ask the L2 for it
|
// The tag matches for the L1, so the L1 ask the L2 for it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
|
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
|
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -395,10 +418,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
action(d_sendDataToRequestor, "d", desc="send data to requestor") {
|
action(d_sendDataToRequestor, "d", desc="send data to requestor") {
|
||||||
peek(requestIntraChipL1Network_in, RequestMsg) {
|
peek(requestIntraChipL1Network_in, RequestMsg) {
|
||||||
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL1CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
@ -408,10 +432,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
|
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
|
||||||
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL1CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
|
@ -422,10 +447,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
|
action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
|
||||||
peek(requestIntraChipL1Network_in, RequestMsg) {
|
peek(requestIntraChipL1Network_in, RequestMsg) {
|
||||||
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.DataBlk := L1_TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := L1_TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
@ -435,10 +461,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
|
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
|
||||||
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.DataBlk := L1_TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := L1_TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
|
@ -460,10 +487,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
|
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
|
||||||
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL1CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
|
@ -473,10 +501,11 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
|
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
|
||||||
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.DataBlk := L1_TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := L1_TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
|
@ -500,14 +529,15 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
|
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
|
||||||
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_response_latency) {
|
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:PUTX;
|
out_msg.Type := CoherenceRequestType:PUTX;
|
||||||
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL1CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Requestor:= machineID;
|
out_msg.Requestor:= machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
if (getL1CacheEntry(address).Dirty) {
|
if (cache_entry.Dirty) {
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
||||||
} else {
|
} else {
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
|
@ -541,25 +571,27 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
|
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
|
sequencer.readCallback(address, cache_entry.DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
|
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getL1CacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
getL1CacheEntry(address).Dirty := true;
|
sequencer.writeCallback(address, cache_entry.DataBlk);
|
||||||
|
cache_entry.Dirty := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
|
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
|
||||||
check_allocate(L1_TBEs);
|
check_allocate(L1_TBEs);
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
L1_TBEs.allocate(address);
|
L1_TBEs.allocate(address);
|
||||||
L1_TBEs[address].isPrefetch := false;
|
set_tbe(L1_TBEs[address]);
|
||||||
L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
|
tbe.isPrefetch := false;
|
||||||
L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
|
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
|
||||||
|
@ -576,21 +608,24 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
|
|
||||||
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
||||||
L1_TBEs.deallocate(address);
|
L1_TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
|
action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
|
||||||
peek(responseIntraChipL1Network_in, ResponseMsg) {
|
peek(responseIntraChipL1Network_in, ResponseMsg) {
|
||||||
getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
getL1CacheEntry(address).Dirty := in_msg.Dirty;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(q_updateAckCount, "q", desc="Update ack count") {
|
action(q_updateAckCount, "q", desc="Update ack count") {
|
||||||
peek(responseIntraChipL1Network_in, ResponseMsg) {
|
peek(responseIntraChipL1Network_in, ResponseMsg) {
|
||||||
L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
|
assert(is_valid(tbe));
|
||||||
|
tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
|
||||||
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
|
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
|
||||||
APPEND_TRANSITION_COMMENT(" p: ");
|
APPEND_TRANSITION_COMMENT(" p: ");
|
||||||
APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
|
APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -603,17 +638,18 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
|
||||||
} else {
|
} else {
|
||||||
L1IcacheMemory.deallocate(address);
|
L1IcacheMemory.deallocate(address);
|
||||||
}
|
}
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
|
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
|
||||||
if (L1DcacheMemory.isTagPresent(address) == false) {
|
if (is_invalid(cache_entry)) {
|
||||||
L1DcacheMemory.allocate(address, new Entry);
|
set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
|
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
|
||||||
if (L1IcacheMemory.isTagPresent(address) == false) {
|
if (is_invalid(cache_entry)) {
|
||||||
L1IcacheMemory.allocate(address, new Entry);
|
set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,6 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
int l2_response_latency = 2,
|
int l2_response_latency = 2,
|
||||||
int to_l1_latency = 1
|
int to_l1_latency = 1
|
||||||
{
|
{
|
||||||
|
|
||||||
// L2 BANK QUEUES
|
// L2 BANK QUEUES
|
||||||
// From local bank of L2 cache TO the network
|
// From local bank of L2 cache TO the network
|
||||||
MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> Memory
|
MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> Memory
|
||||||
|
@ -155,82 +154,80 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
|
|
||||||
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
|
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
|
||||||
|
|
||||||
// inclusive cache, returns L2 entries only
|
void set_cache_entry(AbstractCacheEntry a);
|
||||||
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
|
void unset_cache_entry();
|
||||||
return static_cast(Entry, L2cacheMemory[addr]);
|
void set_tbe(TBE a);
|
||||||
}
|
void unset_tbe();
|
||||||
|
|
||||||
void changeL2Permission(Address addr, AccessPermission permission) {
|
// inclusive cache, returns L2 entries only
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
return L2cacheMemory.changePermission(addr, permission);
|
return static_cast(Entry, "pointer", L2cacheMemory[addr]);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
|
std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
|
||||||
return CoherenceRequestType_to_string(type);
|
return CoherenceRequestType_to_string(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isL2CacheTagPresent(Address addr) {
|
bool isOneSharerLeft(Address addr, MachineID requestor, Entry cache_entry) {
|
||||||
return (L2cacheMemory.isTagPresent(addr));
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.Sharers.isElement(requestor));
|
||||||
|
return (cache_entry.Sharers.count() == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isOneSharerLeft(Address addr, MachineID requestor) {
|
bool isSharer(Address addr, MachineID requestor, Entry cache_entry) {
|
||||||
assert(getL2CacheEntry(addr).Sharers.isElement(requestor));
|
if (is_valid(cache_entry)) {
|
||||||
return (getL2CacheEntry(addr).Sharers.count() == 1);
|
return cache_entry.Sharers.isElement(requestor);
|
||||||
}
|
|
||||||
|
|
||||||
bool isSharer(Address addr, MachineID requestor) {
|
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
|
||||||
return getL2CacheEntry(addr).Sharers.isElement(requestor);
|
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void addSharer(Address addr, MachineID requestor) {
|
void addSharer(Address addr, MachineID requestor, Entry cache_entry) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %s\n",
|
DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %s\n",
|
||||||
machineID, requestor, addr);
|
machineID, requestor, addr);
|
||||||
getL2CacheEntry(addr).Sharers.add(requestor);
|
cache_entry.Sharers.add(requestor);
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(TBE tbe, Entry cache_entry, Address addr) {
|
||||||
if(L2_TBEs.isPresent(addr)) {
|
if(is_valid(tbe)) {
|
||||||
return L2_TBEs[addr].TBEState;
|
return tbe.TBEState;
|
||||||
} else if (isL2CacheTagPresent(addr)) {
|
} else if (is_valid(cache_entry)) {
|
||||||
return getL2CacheEntry(addr).CacheState;
|
return cache_entry.CacheState;
|
||||||
}
|
}
|
||||||
return State:NP;
|
return State:NP;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string getStateStr(Address addr) {
|
std::string getStateStr(TBE tbe, Entry cache_entry, Address addr) {
|
||||||
return L2Cache_State_to_string(getState(addr));
|
return L2Cache_State_to_string(getState(tbe, cache_entry, addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
// when is this called
|
// when is this called
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
|
||||||
|
|
||||||
// MUST CHANGE
|
// MUST CHANGE
|
||||||
if (L2_TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
L2_TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isL2CacheTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
getL2CacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
|
|
||||||
// Set permission
|
// Set permission
|
||||||
if (state == State:SS ) {
|
if (state == State:SS ) {
|
||||||
changeL2Permission(addr, AccessPermission:Read_Only);
|
cache_entry.changePermission(AccessPermission:Read_Only);
|
||||||
} else if (state == State:M) {
|
} else if (state == State:M) {
|
||||||
changeL2Permission(addr, AccessPermission:Read_Write);
|
cache_entry.changePermission(AccessPermission:Read_Write);
|
||||||
} else if (state == State:MT) {
|
} else if (state == State:MT) {
|
||||||
changeL2Permission(addr, AccessPermission:Stale);
|
cache_entry.changePermission(AccessPermission:Stale);
|
||||||
} else {
|
} else {
|
||||||
changeL2Permission(addr, AccessPermission:Busy);
|
cache_entry.changePermission(AccessPermission:Busy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
|
Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr,
|
||||||
|
MachineID requestor, Entry cache_entry) {
|
||||||
if(type == CoherenceRequestType:GETS) {
|
if(type == CoherenceRequestType:GETS) {
|
||||||
return Event:L1_GETS;
|
return Event:L1_GETS;
|
||||||
} else if(type == CoherenceRequestType:GET_INSTR) {
|
} else if(type == CoherenceRequestType:GET_INSTR) {
|
||||||
|
@ -238,13 +235,13 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
} else if (type == CoherenceRequestType:GETX) {
|
} else if (type == CoherenceRequestType:GETX) {
|
||||||
return Event:L1_GETX;
|
return Event:L1_GETX;
|
||||||
} else if (type == CoherenceRequestType:UPGRADE) {
|
} else if (type == CoherenceRequestType:UPGRADE) {
|
||||||
if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
|
if ( is_valid(cache_entry) && cache_entry.Sharers.isElement(requestor) ) {
|
||||||
return Event:L1_UPGRADE;
|
return Event:L1_UPGRADE;
|
||||||
} else {
|
} else {
|
||||||
return Event:L1_GETX;
|
return Event:L1_GETX;
|
||||||
}
|
}
|
||||||
} else if (type == CoherenceRequestType:PUTX) {
|
} else if (type == CoherenceRequestType:PUTX) {
|
||||||
if (isSharer(addr, requestor)) {
|
if (isSharer(addr, requestor, cache_entry)) {
|
||||||
return Event:L1_PUTX;
|
return Event:L1_PUTX;
|
||||||
} else {
|
} else {
|
||||||
return Event:L1_PUTX_old;
|
return Event:L1_PUTX_old;
|
||||||
|
@ -255,6 +252,15 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int getPendingAcks(TBE tbe) {
|
||||||
|
return tbe.pendingAcks;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isDirty(Entry cache_entry) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
|
return cache_entry.Dirty;
|
||||||
|
}
|
||||||
|
|
||||||
// ** OUT_PORTS **
|
// ** OUT_PORTS **
|
||||||
|
|
||||||
out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
|
out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
|
||||||
|
@ -265,15 +271,17 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
|
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
|
||||||
if(L1unblockNetwork_in.isReady()) {
|
if(L1unblockNetwork_in.isReady()) {
|
||||||
peek(L1unblockNetwork_in, ResponseMsg) {
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := L2_TBEs[in_msg.Address];
|
||||||
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
|
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
|
||||||
in_msg.Address, getState(in_msg.Address), in_msg.Sender,
|
in_msg.Address, getState(tbe, cache_entry, in_msg.Address),
|
||||||
in_msg.Type, in_msg.Destination);
|
in_msg.Sender, in_msg.Type, in_msg.Destination);
|
||||||
|
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
|
if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
|
||||||
trigger(Event:Exclusive_Unblock, in_msg.Address);
|
trigger(Event:Exclusive_Unblock, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||||
trigger(Event:Unblock, in_msg.Address);
|
trigger(Event:Unblock, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("unknown unblock message");
|
error("unknown unblock message");
|
||||||
}
|
}
|
||||||
|
@ -281,26 +289,27 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Response IntraChip L2 Network - response msg to this particular L2 bank
|
// Response IntraChip L2 Network - response msg to this particular L2 bank
|
||||||
in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
|
in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
|
||||||
if (responseIntraChipL2Network_in.isReady()) {
|
if (responseIntraChipL2Network_in.isReady()) {
|
||||||
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
||||||
// test wether it's from a local L1 or an off chip source
|
// test wether it's from a local L1 or an off chip source
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := L2_TBEs[in_msg.Address];
|
||||||
|
|
||||||
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||||
if(in_msg.Type == CoherenceResponseType:DATA) {
|
if(in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
if (in_msg.Dirty) {
|
if (in_msg.Dirty) {
|
||||||
trigger(Event:WB_Data, in_msg.Address);
|
trigger(Event:WB_Data, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:WB_Data_clean, in_msg.Address);
|
trigger(Event:WB_Data_clean, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
|
if ((getPendingAcks(tbe) - in_msg.AckCount) == 0) {
|
||||||
trigger(Event:Ack_all, in_msg.Address);
|
trigger(Event:Ack_all, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Ack, in_msg.Address);
|
trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("unknown message type");
|
error("unknown message type");
|
||||||
|
@ -308,11 +317,14 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
|
|
||||||
} else { // external message
|
} else { // external message
|
||||||
if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
||||||
trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
|
// L2 now has data and all off-chip acks
|
||||||
|
trigger(Event:Mem_Data, in_msg.Address, cache_entry, tbe);
|
||||||
} else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
|
} else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
|
||||||
trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
|
// L2 now has data and all off-chip acks
|
||||||
|
trigger(Event:Mem_Ack, in_msg.Address, cache_entry, tbe);
|
||||||
} else if(in_msg.Type == CoherenceResponseType:INV) {
|
} else if(in_msg.Type == CoherenceResponseType:INV) {
|
||||||
trigger(Event:MEM_Inv, in_msg.Address); // L2 now has data and all off-chip acks
|
// L2 now has data and all off-chip acks
|
||||||
|
trigger(Event:MEM_Inv, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("unknown message type");
|
error("unknown message type");
|
||||||
}
|
}
|
||||||
|
@ -325,24 +337,36 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
|
in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
|
||||||
if(L1RequestIntraChipL2Network_in.isReady()) {
|
if(L1RequestIntraChipL2Network_in.isReady()) {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := L2_TBEs[in_msg.Address];
|
||||||
|
|
||||||
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
|
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
|
||||||
in_msg.Address, getState(in_msg.Address), in_msg.Requestor,
|
in_msg.Address, getState(tbe, cache_entry, in_msg.Address),
|
||||||
in_msg.Type, in_msg.Destination);
|
in_msg.Requestor, in_msg.Type, in_msg.Destination);
|
||||||
|
|
||||||
assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
|
assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
|
|
||||||
|
if (is_valid(cache_entry)) {
|
||||||
// The L2 contains the block, so proceeded with handling the request
|
// The L2 contains the block, so proceeded with handling the request
|
||||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address,
|
||||||
|
in_msg.Requestor, cache_entry),
|
||||||
|
in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
|
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
|
||||||
// L2 does't have the line, but we have space for it in the L2
|
// L2 does't have the line, but we have space for it in the L2
|
||||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address,
|
||||||
|
in_msg.Requestor, cache_entry),
|
||||||
|
in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
// No room in the L2, so we need to make room before handling the request
|
// No room in the L2, so we need to make room before handling the request
|
||||||
if (getL2CacheEntry( L2cacheMemory.cacheProbe(in_msg.Address) ).Dirty ) {
|
Entry L2cache_entry := getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address));
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
if (isDirty(L2cache_entry)) {
|
||||||
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address),
|
||||||
|
L2cache_entry, L2_TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
|
trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address),
|
||||||
|
L2cache_entry, L2_TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -368,10 +392,11 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
|
action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := in_msg.Type;
|
out_msg.Type := in_msg.Type;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
|
out_msg.Destination.add(cache_entry.Exclusive);
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -379,12 +404,13 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
|
|
||||||
action(c_exclusiveReplacement, "c", desc="Send data to memory") {
|
action(c_exclusiveReplacement, "c", desc="Send data to memory") {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -399,33 +425,33 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
|
action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.DataBlk := L2_TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := L2_TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
|
action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
|
||||||
out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
|
out_msg.AckCount := 0 - cache_entry.Sharers.count();
|
||||||
if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
|
if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
|
||||||
out_msg.AckCount := out_msg.AckCount + 1;
|
out_msg.AckCount := out_msg.AckCount + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -435,16 +461,17 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
|
action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
|
||||||
out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
|
out_msg.AckCount := 0 - cache_entry.Sharers.count();
|
||||||
if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
|
if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
|
||||||
out_msg.AckCount := out_msg.AckCount + 1;
|
out_msg.AckCount := out_msg.AckCount + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -454,12 +481,13 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
|
action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
out_msg.AckCount := 0;
|
out_msg.AckCount := 0;
|
||||||
}
|
}
|
||||||
|
@ -467,54 +495,59 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
|
action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
|
||||||
assert(L2_TBEs[address].L1_GetS_IDs.count() > 0);
|
assert(is_valid(tbe));
|
||||||
|
assert(tbe.L1_GetS_IDs.count() > 0);
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
|
out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
|
action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
|
||||||
assert(L2_TBEs[address].L1_GetS_IDs.count() == 1);
|
assert(is_valid(tbe));
|
||||||
|
assert(tbe.L1_GetS_IDs.count() == 1);
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
|
out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
|
action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
|
out_msg.Destination.add(tbe.L1_GetX_ID);
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
DPRINTF(RubySlicc, "Address: %s, Destination: %s, DataBlock: %s\n",
|
DPRINTF(RubySlicc, "Address: %s, Destination: %s, DataBlock: %s\n",
|
||||||
out_msg.Address, out_msg.Destination, out_msg.DataBlk);
|
out_msg.Address, out_msg.Destination, out_msg.DataBlk);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
|
action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
|
||||||
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
out_msg.Destination := cache_entry.Sharers;
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -522,23 +555,24 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
|
action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
out_msg.Destination := cache_entry.Sharers;
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
|
action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
out_msg.Destination := cache_entry.Sharers;
|
||||||
out_msg.Destination.remove(in_msg.Requestor);
|
out_msg.Destination.remove(in_msg.Requestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
}
|
}
|
||||||
|
@ -548,11 +582,13 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
// OTHER ACTIONS
|
// OTHER ACTIONS
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
|
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
|
||||||
check_allocate(L2_TBEs);
|
check_allocate(L2_TBEs);
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
L2_TBEs.allocate(address);
|
L2_TBEs.allocate(address);
|
||||||
L2_TBEs[address].L1_GetS_IDs.clear();
|
set_tbe(L2_TBEs[address]);
|
||||||
L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
|
tbe.L1_GetS_IDs.clear();
|
||||||
L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
|
tbe.pendingAcks := cache_entry.Sharers.count();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
|
action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
|
||||||
|
@ -567,56 +603,58 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
|
profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
|
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
|
||||||
profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
|
profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
|
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
|
||||||
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
||||||
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
getL2CacheEntry(address).Dirty := in_msg.Dirty;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
|
action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
getL2CacheEntry(address).Dirty := in_msg.Dirty;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(q_updateAck, "q", desc="update pending ack count") {
|
action(q_updateAck, "q", desc="update pending ack count") {
|
||||||
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
||||||
L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
|
assert(is_valid(tbe));
|
||||||
|
tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
|
||||||
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
|
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
|
||||||
APPEND_TRANSITION_COMMENT(" p: ");
|
APPEND_TRANSITION_COMMENT(" p: ");
|
||||||
APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
|
APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
|
action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
|
||||||
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
||||||
L2_TBEs[address].DataBlk := in_msg.DataBlk;
|
assert(is_valid(tbe));
|
||||||
L2_TBEs[address].Dirty := in_msg.Dirty;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
|
tbe.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(z_stall, "z", desc="Stall") {
|
action(z_stall, "z", desc="Stall") {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
|
action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
|
assert(is_valid(tbe));
|
||||||
|
tbe.L1_GetS_IDs.add(in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
|
action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
|
assert(is_valid(tbe));
|
||||||
|
tbe.L1_GetX_ID := in_msg.Requestor;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -625,13 +663,14 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
|
action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
|
||||||
if (L2cacheMemory.isTagPresent(address) == false) {
|
if (is_invalid(cache_entry)) {
|
||||||
L2cacheMemory.allocate(address, new Entry);
|
set_cache_entry(L2cacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
||||||
L2cacheMemory.deallocate(address);
|
L2cacheMemory.deallocate(address);
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(t_sendWBAck, "t", desc="Send writeback ACK") {
|
action(t_sendWBAck, "t", desc="Send writeback ACK") {
|
||||||
|
@ -649,13 +688,14 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
|
action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:ACK;
|
out_msg.Type := CoherenceResponseType:ACK;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||||
// upgrader doesn't get ack from itself, hence the + 1
|
// upgrader doesn't get ack from itself, hence the + 1
|
||||||
out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
|
out_msg.AckCount := 0 - cache_entry.Sharers.count() + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -672,47 +712,50 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
|
action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
addSharer(address, in_msg.Requestor);
|
assert(is_valid(cache_entry));
|
||||||
APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
|
addSharer(address, in_msg.Requestor, cache_entry);
|
||||||
|
APPEND_TRANSITION_COMMENT( cache_entry.Sharers );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
|
action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
|
||||||
peek(L1unblockNetwork_in, ResponseMsg) {
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
||||||
addSharer(address, in_msg.Sender);
|
assert(is_valid(cache_entry));
|
||||||
|
addSharer(address, in_msg.Sender, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
|
action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
getL2CacheEntry(address).Sharers.remove(in_msg.Requestor);
|
assert(is_valid(cache_entry));
|
||||||
|
cache_entry.Sharers.remove(in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
|
action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
getL2CacheEntry(address).Sharers.clear();
|
assert(is_valid(cache_entry));
|
||||||
|
cache_entry.Sharers.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mm_markExclusive, "\m", desc="set the exclusive owner") {
|
action(mm_markExclusive, "\m", desc="set the exclusive owner") {
|
||||||
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
||||||
getL2CacheEntry(address).Sharers.clear();
|
assert(is_valid(cache_entry));
|
||||||
getL2CacheEntry(address).Exclusive := in_msg.Requestor;
|
cache_entry.Sharers.clear();
|
||||||
addSharer(address, in_msg.Requestor);
|
cache_entry.Exclusive := in_msg.Requestor;
|
||||||
|
addSharer(address, in_msg.Requestor, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
|
action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
|
||||||
peek(L1unblockNetwork_in, ResponseMsg) {
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
||||||
getL2CacheEntry(address).Sharers.clear();
|
assert(is_valid(cache_entry));
|
||||||
getL2CacheEntry(address).Exclusive := in_msg.Sender;
|
cache_entry.Sharers.clear();
|
||||||
addSharer(address, in_msg.Sender);
|
cache_entry.Exclusive := in_msg.Sender;
|
||||||
|
addSharer(address, in_msg.Sender, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1060,6 +1103,3 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
||||||
o_popIncomingResponseQueue;
|
o_popIncomingResponseQueue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -107,13 +107,16 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
|
|
||||||
TBETable TBEs, template_hack="<Directory_TBE>";
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
||||||
|
|
||||||
|
void set_tbe(TBE tbe);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
||||||
return static_cast(Entry, directory[addr]);
|
return static_cast(Entry, directory[addr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(TBE tbe, Address addr) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
return TBEs[addr].TBEState;
|
return tbe.TBEState;
|
||||||
} else if (directory.isPresent(addr)) {
|
} else if (directory.isPresent(addr)) {
|
||||||
return getDirectoryEntry(addr).DirectoryState;
|
return getDirectoryEntry(addr).DirectoryState;
|
||||||
} else {
|
} else {
|
||||||
|
@ -122,10 +125,10 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Address addr, State state) {
|
||||||
|
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (directory.isPresent(addr)) {
|
if (directory.isPresent(addr)) {
|
||||||
|
@ -161,11 +164,13 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (isGETRequest(in_msg.Type)) {
|
if (isGETRequest(in_msg.Type)) {
|
||||||
trigger(Event:Fetch, in_msg.Address);
|
trigger(Event:Fetch, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address));
|
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address));
|
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg);
|
DPRINTF(RubySlicc, "%s\n", in_msg);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -179,9 +184,9 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:CleanReplacement, in_msg.Address);
|
trigger(Event:CleanReplacement, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -195,9 +200,9 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.Address);
|
trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.Address);
|
trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -412,15 +417,17 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].PhysicalAddress := in_msg.Address;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
TBEs[address].Len := in_msg.Len;
|
tbe.PhysicalAddress := in_msg.Address;
|
||||||
|
tbe.Len := in_msg.Len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
||||||
//getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
|
assert(is_valid(tbe));
|
||||||
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
//getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
|
||||||
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -429,12 +436,13 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
|
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
|
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||||
out_msg.OriginalRequestorMachId := in_msg.Sender;
|
out_msg.OriginalRequestorMachId := in_msg.Sender;
|
||||||
//out_msg.DataBlk := in_msg.DataBlk;
|
//out_msg.DataBlk := in_msg.DataBlk;
|
||||||
//out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
|
//out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
|
||||||
out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
|
|
||||||
out_msg.MessageSize := in_msg.MessageSize;
|
out_msg.MessageSize := in_msg.MessageSize;
|
||||||
//out_msg.Prefetch := in_msg.Prefetch;
|
//out_msg.Prefetch := in_msg.Prefetch;
|
||||||
|
@ -446,6 +454,7 @@ machine(Directory, "MESI_CMP_filter_directory protocol")
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,15 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
|
|
||||||
TBETable TBEs, template_hack="<L1Cache_TBE>";
|
TBETable TBEs, template_hack="<L1Cache_TBE>";
|
||||||
|
|
||||||
|
// PROTOTYPES
|
||||||
|
void set_cache_entry(AbstractCacheEntry a);
|
||||||
|
void unset_cache_entry();
|
||||||
|
void set_tbe(TBE b);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
|
Entry getCacheEntry(Address address), return_by_pointer="yes" {
|
||||||
|
return static_cast(Entry, "pointer", cacheMemory.lookup(address));
|
||||||
|
}
|
||||||
|
|
||||||
// FUNCTIONS
|
// FUNCTIONS
|
||||||
Event mandatory_request_type_to_event(CacheRequestType type) {
|
Event mandatory_request_type_to_event(CacheRequestType type) {
|
||||||
|
@ -88,35 +96,31 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Entry getCacheEntry(Address addr), return_by_ref="yes" {
|
State getState(TBE tbe, Entry cache_entry, Address addr) {
|
||||||
return static_cast(Entry, cacheMemory[addr]);
|
|
||||||
}
|
|
||||||
|
|
||||||
State getState(Address addr) {
|
if (is_valid(tbe)) {
|
||||||
|
return tbe.TBEState;
|
||||||
if (TBEs.isPresent(addr)) {
|
|
||||||
return TBEs[addr].TBEState;
|
|
||||||
}
|
}
|
||||||
else if (cacheMemory.isTagPresent(addr)) {
|
else if (is_valid(cache_entry)) {
|
||||||
return getCacheEntry(addr).CacheState;
|
return cache_entry.CacheState;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return State:I;
|
return State:I;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
|
||||||
|
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cacheMemory.isTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
getCacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
if (state == State:M) {
|
if (state == State:M) {
|
||||||
cacheMemory.changePermission(addr, AccessPermission:Read_Write);
|
cache_entry.changePermission(AccessPermission:Read_Write);
|
||||||
} else {
|
} else {
|
||||||
cacheMemory.changePermission(addr, AccessPermission:Invalid);
|
cache_entry.changePermission(AccessPermission:Invalid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,17 +145,21 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
|
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
|
||||||
if (forwardRequestNetwork_in.isReady()) {
|
if (forwardRequestNetwork_in.isReady()) {
|
||||||
peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
|
peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:Fwd_GETX, in_msg.Address);
|
trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||||
trigger(Event:Writeback_Ack, in_msg.Address);
|
trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||||
trigger(Event:Writeback_Nack, in_msg.Address);
|
trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
else if (in_msg.Type == CoherenceRequestType:INV) {
|
else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Inv, in_msg.Address);
|
trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
|
@ -163,8 +171,12 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
|
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
|
||||||
if (responseNetwork_in.isReady()) {
|
if (responseNetwork_in.isReady()) {
|
||||||
peek(responseNetwork_in, ResponseMsg, block_on="Address") {
|
peek(responseNetwork_in, ResponseMsg, block_on="Address") {
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:DATA) {
|
if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
|
@ -178,14 +190,17 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
if (mandatoryQueue_in.isReady()) {
|
if (mandatoryQueue_in.isReady()) {
|
||||||
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
|
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
|
||||||
if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
|
if (is_invalid(cache_entry) &&
|
||||||
cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
|
cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
|
||||||
// make room for the block
|
// make room for the block
|
||||||
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
|
||||||
|
cache_entry, TBEs[in_msg.LineAddress]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -205,11 +220,12 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
|
|
||||||
action(b_issuePUT, "b", desc="Issue a PUT request") {
|
action(b_issuePUT, "b", desc="Issue a PUT request") {
|
||||||
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
|
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:PUTX;
|
out_msg.Type := CoherenceRequestType:PUTX;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Data;
|
out_msg.MessageSize := MessageSizeType:Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -218,11 +234,12 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
||||||
peek(forwardRequestNetwork_in, RequestMsg) {
|
peek(forwardRequestNetwork_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -231,26 +248,28 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
|
action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
|
||||||
peek(forwardRequestNetwork_in, RequestMsg) {
|
peek(forwardRequestNetwork_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
|
action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
|
||||||
if (cacheMemory.isTagPresent(address) == false) {
|
if (is_valid(cache_entry)) {
|
||||||
cacheMemory.allocate(address, new Entry);
|
} else {
|
||||||
|
set_cache_entry(cacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
|
action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
|
||||||
if (cacheMemory.isTagPresent(address) == true) {
|
if (is_valid(cache_entry)) {
|
||||||
cacheMemory.deallocate(address);
|
cacheMemory.deallocate(address);
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,55 +292,64 @@ machine(L1Cache, "MI Example L1 Cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
|
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
|
||||||
DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
|
||||||
sequencer.readCallback(address,
|
sequencer.readCallback(address,
|
||||||
GenericMachineType:L1Cache,
|
GenericMachineType:L1Cache,
|
||||||
getCacheEntry(address).DataBlk);
|
cache_entry.DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rx_load_hit, "rx", desc="External load completed.") {
|
action(rx_load_hit, "rx", desc="External load completed.") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
|
||||||
sequencer.readCallback(address,
|
sequencer.readCallback(address,
|
||||||
getNondirectHitMachType(in_msg.Sender),
|
getNondirectHitMachType(in_msg.Sender),
|
||||||
getCacheEntry(address).DataBlk);
|
cache_entry.DataBlk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
|
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
|
||||||
DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
|
||||||
sequencer.writeCallback(address,
|
sequencer.writeCallback(address,
|
||||||
GenericMachineType:L1Cache,
|
GenericMachineType:L1Cache,
|
||||||
getCacheEntry(address).DataBlk);
|
cache_entry.DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(sx_store_hit, "sx", desc="External store completed.") {
|
action(sx_store_hit, "sx", desc="External store completed.") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
|
||||||
sequencer.writeCallback(address,
|
sequencer.writeCallback(address,
|
||||||
getNondirectHitMachType(in_msg.Sender),
|
getNondirectHitMachType(in_msg.Sender),
|
||||||
getCacheEntry(address).DataBlk);
|
cache_entry.DataBlk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(u_writeDataToCache, "u", desc="Write data to the cache") {
|
action(u_writeDataToCache, "u", desc="Write data to the cache") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
|
set_tbe(TBEs[address]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
|
action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
|
||||||
TBEs[address].DataBlk := getCacheEntry(address).DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
|
assert(is_valid(tbe));
|
||||||
|
tbe.DataBlk := cache_entry.DataBlk;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(z_stall, "z", desc="stall") {
|
action(z_stall, "z", desc="stall") {
|
||||||
|
|
|
@ -76,13 +76,16 @@ machine(Directory, "Directory protocol")
|
||||||
// ** OBJECTS **
|
// ** OBJECTS **
|
||||||
TBETable TBEs, template_hack="<Directory_TBE>";
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
||||||
|
|
||||||
|
void set_tbe(TBE b);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
||||||
return static_cast(Entry, directory[addr]);
|
return static_cast(Entry, directory[addr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(TBE tbe, Address addr) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
return TBEs[addr].TBEState;
|
return tbe.TBEState;
|
||||||
} else if (directory.isPresent(addr)) {
|
} else if (directory.isPresent(addr)) {
|
||||||
return getDirectoryEntry(addr).DirectoryState;
|
return getDirectoryEntry(addr).DirectoryState;
|
||||||
} else {
|
} else {
|
||||||
|
@ -90,10 +93,10 @@ machine(Directory, "Directory protocol")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Address addr, State state) {
|
||||||
|
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (directory.isPresent(addr)) {
|
if (directory.isPresent(addr)) {
|
||||||
|
@ -126,10 +129,11 @@ machine(Directory, "Directory protocol")
|
||||||
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
|
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
|
TBE tbe := TBEs[in_msg.LineAddress];
|
||||||
if (in_msg.Type == DMARequestType:READ) {
|
if (in_msg.Type == DMARequestType:READ) {
|
||||||
trigger(Event:DMA_READ, in_msg.LineAddress);
|
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
|
||||||
} else if (in_msg.Type == DMARequestType:WRITE) {
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
||||||
trigger(Event:DMA_WRITE, in_msg.LineAddress);
|
trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -140,15 +144,16 @@ machine(Directory, "Directory protocol")
|
||||||
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
||||||
if (requestQueue_in.isReady()) {
|
if (requestQueue_in.isReady()) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:GETS, in_msg.Address);
|
trigger(Event:GETS, in_msg.Address, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:GETX, in_msg.Address);
|
trigger(Event:GETX, in_msg.Address, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||||
if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
|
if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
|
||||||
trigger(Event:PUTX, in_msg.Address);
|
trigger(Event:PUTX, in_msg.Address, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:PUTX_NotOwner, in_msg.Address);
|
trigger(Event:PUTX_NotOwner, in_msg.Address, tbe);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -162,10 +167,11 @@ machine(Directory, "Directory protocol")
|
||||||
in_port(memQueue_in, MemoryMsg, memBuffer) {
|
in_port(memQueue_in, MemoryMsg, memBuffer) {
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.Address);
|
trigger(Event:Memory_Data, in_msg.Address, tbe);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.Address);
|
trigger(Event:Memory_Ack, in_msg.Address, tbe);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc,"%s\n", in_msg.Type);
|
DPRINTF(RubySlicc,"%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -232,11 +238,12 @@ machine(Directory, "Directory protocol")
|
||||||
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
|
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:DATA;
|
out_msg.Type := DMAResponseType:DATA;
|
||||||
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -247,11 +254,12 @@ machine(Directory, "Directory protocol")
|
||||||
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
|
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:DATA;
|
out_msg.Type := DMAResponseType:DATA;
|
||||||
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -259,10 +267,11 @@ machine(Directory, "Directory protocol")
|
||||||
|
|
||||||
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
|
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
|
||||||
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:ACK;
|
out_msg.Type := DMAResponseType:ACK;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -320,35 +329,40 @@ machine(Directory, "Directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
||||||
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
assert(is_valid(tbe));
|
||||||
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
TBEs[address].Len := in_msg.Len;
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
||||||
TBEs[address].DmaRequestor := in_msg.Requestor;
|
tbe.Len := in_msg.Len;
|
||||||
|
tbe.DmaRequestor := in_msg.Requestor;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
|
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DmaRequestor := in_msg.Requestor;
|
set_tbe(TBEs[address]);
|
||||||
|
tbe.DmaRequestor := in_msg.Requestor;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
|
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
set_tbe(TBEs[address]);
|
||||||
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
|
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
|
||||||
|
@ -407,12 +421,13 @@ machine(Directory, "Directory protocol")
|
||||||
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
|
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||||
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
||||||
// get incoming data
|
// get incoming data
|
||||||
// out_msg.DataBlk := in_msg.DataBlk;
|
// out_msg.DataBlk := in_msg.DataBlk;
|
||||||
out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
out_msg.MessageSize := in_msg.MessageSize;
|
out_msg.MessageSize := in_msg.MessageSize;
|
||||||
//out_msg.Prefetch := in_msg.Prefetch;
|
//out_msg.Prefetch := in_msg.Prefetch;
|
||||||
|
|
||||||
|
@ -445,9 +460,10 @@ machine(Directory, "Directory protocol")
|
||||||
|
|
||||||
action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
|
action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
|
||||||
//getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
//getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
||||||
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk,
|
assert(is_valid(tbe));
|
||||||
addressOffset(TBEs[address].PhysicalAddress),
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
|
||||||
TBEs[address].Len);
|
addressOffset(tbe.PhysicalAddress),
|
||||||
|
tbe.Len);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -136,6 +136,10 @@ machine(L1Cache, "Directory protocol")
|
||||||
bool isPresent(Address);
|
bool isPresent(Address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_cache_entry(AbstractCacheEntry b);
|
||||||
|
void unset_cache_entry();
|
||||||
|
void set_tbe(TBE b);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
|
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
|
||||||
|
|
||||||
|
@ -143,102 +147,69 @@ machine(L1Cache, "Directory protocol")
|
||||||
TimerTable useTimerTable;
|
TimerTable useTimerTable;
|
||||||
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
||||||
|
|
||||||
Entry getCacheEntry(Address addr), return_by_ref="yes" {
|
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
if (L1DcacheMemory.isTagPresent(addr)) {
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
|
||||||
return static_cast(Entry, L1DcacheMemory[addr]);
|
if(is_valid(L1Dcache_entry)) {
|
||||||
} else {
|
return L1Dcache_entry;
|
||||||
return static_cast(Entry, L1IcacheMemory[addr]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void changePermission(Address addr, AccessPermission permission) {
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
|
||||||
if (L1DcacheMemory.isTagPresent(addr)) {
|
return L1Icache_entry;
|
||||||
return L1DcacheMemory.changePermission(addr, permission);
|
|
||||||
} else {
|
|
||||||
return L1IcacheMemory.changePermission(addr, permission);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isCacheTagPresent(Address addr) {
|
Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
|
return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
|
||||||
|
}
|
||||||
|
|
||||||
if(TBEs.isPresent(addr)) {
|
State getState(TBE tbe, Entry cache_entry, Address addr) {
|
||||||
return TBEs[addr].TBEState;
|
if(is_valid(tbe)) {
|
||||||
} else if (isCacheTagPresent(addr)) {
|
return tbe.TBEState;
|
||||||
return getCacheEntry(addr).CacheState;
|
} else if (is_valid(cache_entry)) {
|
||||||
|
return cache_entry.CacheState;
|
||||||
}
|
}
|
||||||
return State:I;
|
return State:I;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
||||||
|
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isCacheTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
if ( ((getCacheEntry(addr).CacheState != State:M) && (state == State:M)) ||
|
if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
|
||||||
((getCacheEntry(addr).CacheState != State:MM) && (state == State:MM)) ||
|
((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
|
||||||
((getCacheEntry(addr).CacheState != State:S) && (state == State:S)) ||
|
((cache_entry.CacheState != State:S) && (state == State:S)) ||
|
||||||
((getCacheEntry(addr).CacheState != State:O) && (state == State:O)) ) {
|
((cache_entry.CacheState != State:O) && (state == State:O)) ) {
|
||||||
|
|
||||||
getCacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
sequencer.checkCoherence(addr);
|
sequencer.checkCoherence(addr);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
getCacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set permission
|
// Set permission
|
||||||
if (state == State:MM || state == State:MM_W) {
|
if (state == State:MM || state == State:MM_W) {
|
||||||
changePermission(addr, AccessPermission:Read_Write);
|
cache_entry.changePermission(AccessPermission:Read_Write);
|
||||||
} else if ((state == State:S) ||
|
} else if ((state == State:S) ||
|
||||||
(state == State:O) ||
|
(state == State:O) ||
|
||||||
(state == State:M) ||
|
(state == State:M) ||
|
||||||
(state == State:M_W) ||
|
(state == State:M_W) ||
|
||||||
(state == State:SM) ||
|
(state == State:SM) ||
|
||||||
(state == State:OM)) {
|
(state == State:OM)) {
|
||||||
changePermission(addr, AccessPermission:Read_Only);
|
cache_entry.changePermission(AccessPermission:Read_Only);
|
||||||
} else {
|
} else {
|
||||||
changePermission(addr, AccessPermission:Invalid);
|
cache_entry.changePermission(AccessPermission:Invalid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isBlockExclusive(Address addr) {
|
|
||||||
|
|
||||||
if (isCacheTagPresent(addr)) {
|
|
||||||
if ( (getCacheEntry(addr).CacheState == State:M) || (getCacheEntry(addr).CacheState == State:MM)
|
|
||||||
|| (getCacheEntry(addr).CacheState == State:MI) || (getCacheEntry(addr).CacheState == State:MM_W)
|
|
||||||
) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isBlockShared(Address addr) {
|
|
||||||
if (isCacheTagPresent(addr)) {
|
|
||||||
if ( (getCacheEntry(addr).CacheState == State:S) || (getCacheEntry(addr).CacheState == State:O)
|
|
||||||
|| (getCacheEntry(addr).CacheState == State:SM)
|
|
||||||
|| (getCacheEntry(addr).CacheState == State:OI)
|
|
||||||
|| (getCacheEntry(addr).CacheState == State:SI)
|
|
||||||
|| (getCacheEntry(addr).CacheState == State:OM)
|
|
||||||
) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Event mandatory_request_type_to_event(CacheRequestType type) {
|
Event mandatory_request_type_to_event(CacheRequestType type) {
|
||||||
if (type == CacheRequestType:LD) {
|
if (type == CacheRequestType:LD) {
|
||||||
return Event:Load;
|
return Event:Load;
|
||||||
|
@ -265,7 +236,9 @@ machine(L1Cache, "Directory protocol")
|
||||||
// Use Timer
|
// Use Timer
|
||||||
in_port(useTimerTable_in, Address, useTimerTable) {
|
in_port(useTimerTable_in, Address, useTimerTable) {
|
||||||
if (useTimerTable_in.isReady()) {
|
if (useTimerTable_in.isReady()) {
|
||||||
trigger(Event:Use_Timeout, useTimerTable.readyAddress());
|
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
|
||||||
|
getCacheEntry(useTimerTable.readyAddress()),
|
||||||
|
TBEs[useTimerTable.readyAddress()]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,7 +256,8 @@ machine(L1Cache, "Directory protocol")
|
||||||
if (triggerQueue_in.isReady()) {
|
if (triggerQueue_in.isReady()) {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_acks, in_msg.Address);
|
trigger(Event:All_acks, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -299,24 +273,33 @@ machine(L1Cache, "Directory protocol")
|
||||||
peek(requestNetwork_in, RequestMsg, block_on="Address") {
|
peek(requestNetwork_in, RequestMsg, block_on="Address") {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
|
||||||
|
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
|
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||||
trigger(Event:Own_GETX, in_msg.Address);
|
trigger(Event:Own_GETX, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Fwd_GETX, in_msg.Address);
|
trigger(Event:Fwd_GETX, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:Fwd_GETS, in_msg.Address);
|
trigger(Event:Fwd_GETS, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:Fwd_DMA, in_msg.Address);
|
trigger(Event:Fwd_DMA, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||||
trigger(Event:Writeback_Ack, in_msg.Address);
|
trigger(Event:Writeback_Ack, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
|
||||||
trigger(Event:Writeback_Ack_Data, in_msg.Address);
|
trigger(Event:Writeback_Ack_Data, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||||
trigger(Event:Writeback_Nack, in_msg.Address);
|
trigger(Event:Writeback_Nack, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Inv, in_msg.Address);
|
trigger(Event:Inv, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -329,11 +312,14 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
if (responseToL1Cache_in.isReady()) {
|
if (responseToL1Cache_in.isReady()) {
|
||||||
peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
|
peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack, in_msg.Address);
|
trigger(Event:Ack, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Data, in_msg.Address);
|
trigger(Event:Exclusive_Data, in_msg.Address,
|
||||||
|
getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -352,41 +338,63 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
if (in_msg.Type == CacheRequestType:IFETCH) {
|
if (in_msg.Type == CacheRequestType:IFETCH) {
|
||||||
// ** INSTRUCTION ACCESS ***
|
// ** INSTRUCTION ACCESS ***
|
||||||
|
|
||||||
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress);
|
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
}
|
}
|
||||||
if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
|
||||||
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 asks the L2 for it.
|
// The tag matches for the L1, so the L1 asks the L2 for it.
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Icache_entry,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Icache_entry,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L1_Replacement,
|
||||||
|
L1IcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// *** DATA ACCESS ***
|
// *** DATA ACCESS ***
|
||||||
|
|
||||||
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The block is in the wrong L1, put the request on the queue to the shared L2
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
||||||
trigger(Event:L1_Replacement, in_msg.LineAddress);
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
||||||
|
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
||||||
}
|
}
|
||||||
if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
|
||||||
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 ask the L2 for it
|
// The tag matches for the L1, so the L1 ask the L2 for it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Dcache_entry,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Dcache_entry,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room in the L1
|
// No room in the L1, so we need to make room in the L1
|
||||||
trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L1_Replacement,
|
||||||
|
L1DcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -418,6 +426,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:GETX;
|
out_msg.Type := CoherenceRequestType:GETX;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
|
@ -433,6 +442,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:PUTX;
|
out_msg.Type := CoherenceRequestType:PUTX;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
|
@ -465,6 +475,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
|
@ -472,8 +483,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
// out_msg.Dirty := getCacheEntry(address).Dirty;
|
// out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
@ -486,8 +497,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
// out_msg.Dirty := getCacheEntry(address).Dirty;
|
// out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
||||||
|
@ -499,13 +510,14 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
|
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Acks := 0; // irrelevant
|
out_msg.Acks := 0; // irrelevant
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
|
@ -514,6 +526,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
|
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
|
@ -522,8 +535,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.SenderMachine := MachineType:L1Cache;
|
out_msg.SenderMachine := MachineType:L1Cache;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
|
@ -536,8 +549,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.SenderMachine := MachineType:L1Cache;
|
out_msg.SenderMachine := MachineType:L1Cache;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
||||||
}
|
}
|
||||||
|
@ -590,6 +603,7 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
|
out_msg.SenderMachine := MachineType:L1Cache;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
||||||
|
@ -597,21 +611,25 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
}
|
}
|
||||||
|
|
||||||
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
|
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
sequencer.readCallback(address, getCacheEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
|
sequencer.readCallback(address, cache_entry.DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
|
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
getCacheEntry(address).Dirty := true;
|
sequencer.writeCallback(address, cache_entry.DataBlk);
|
||||||
|
cache_entry.Dirty := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].Dirty := getCacheEntry(address).Dirty;
|
assert(is_valid(cache_entry));
|
||||||
|
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
||||||
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
|
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
|
||||||
|
@ -632,14 +650,16 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
||||||
peek(responseToL1Cache_in, ResponseMsg) {
|
peek(responseToL1Cache_in, ResponseMsg) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
|
DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
|
action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -648,7 +668,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
}
|
}
|
||||||
|
|
||||||
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
||||||
if (TBEs[address].NumPendingMsgs == 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs == 0) {
|
||||||
enqueue(triggerQueue_out, TriggerMsg) {
|
enqueue(triggerQueue_out, TriggerMsg) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := TriggerType:ALL_ACKS;
|
out_msg.Type := TriggerType:ALL_ACKS;
|
||||||
|
@ -663,14 +684,15 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
|
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
// out_msg.Dirty := TBEs[address].Dirty;
|
// out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
||||||
|
@ -683,8 +705,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
// out_msg.Dirty := TBEs[address].Dirty;
|
// out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
@ -695,14 +717,15 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
|
action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
||||||
}
|
}
|
||||||
|
@ -714,8 +737,8 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Acks := in_msg.Acks;
|
out_msg.Acks := in_msg.Acks;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
|
@ -727,30 +750,33 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
// L2 will usually request data for a writeback
|
// L2 will usually request data for a writeback
|
||||||
action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
|
action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.SenderMachine := MachineType:L1Cache;
|
out_msg.SenderMachine := MachineType:L1Cache;
|
||||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||||
l2_select_low_bit, l2_select_num_bits));
|
l2_select_low_bit, l2_select_num_bits));
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
if (TBEs[address].Dirty) {
|
if (tbe.Dirty) {
|
||||||
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
|
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
|
||||||
} else {
|
} else {
|
||||||
out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
|
out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
|
||||||
}
|
}
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
||||||
peek(responseToL1Cache_in, ResponseMsg) {
|
peek(responseToL1Cache_in, ResponseMsg) {
|
||||||
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
getCacheEntry(address).Dirty := in_msg.Dirty;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:DATA) {
|
if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
//assert(in_msg.Dirty == false);
|
//assert(in_msg.Dirty == false);
|
||||||
|
@ -761,9 +787,10 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
|
|
||||||
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
|
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
|
||||||
peek(responseToL1Cache_in, ResponseMsg) {
|
peek(responseToL1Cache_in, ResponseMsg) {
|
||||||
assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(cache_entry.DataBlk == in_msg.DataBlk);
|
||||||
getCacheEntry(address).Dirty := in_msg.Dirty;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -773,17 +800,18 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
} else {
|
} else {
|
||||||
L1IcacheMemory.deallocate(address);
|
L1IcacheMemory.deallocate(address);
|
||||||
}
|
}
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
||||||
if (L1DcacheMemory.isTagPresent(address) == false) {
|
if ((is_invalid(cache_entry))) {
|
||||||
L1DcacheMemory.allocate(address, new Entry);
|
set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
|
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
|
||||||
if (L1IcacheMemory.isTagPresent(address) == false) {
|
if ((is_invalid(cache_entry))) {
|
||||||
L1IcacheMemory.allocate(address, new Entry);
|
set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1173,4 +1201,3 @@ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestT
|
||||||
l_popForwardQueue;
|
l_popForwardQueue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -119,15 +119,18 @@ machine(Directory, "Directory protocol")
|
||||||
// ** OBJECTS **
|
// ** OBJECTS **
|
||||||
TBETable TBEs, template_hack="<Directory_TBE>";
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
||||||
|
|
||||||
|
void set_tbe(TBE b);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
||||||
return static_cast(Entry, directory[addr]);
|
return static_cast(Entry, directory[addr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(TBE tbe, Address addr) {
|
||||||
return getDirectoryEntry(addr).DirectoryState;
|
return getDirectoryEntry(addr).DirectoryState;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Address addr, State state) {
|
||||||
if (directory.isPresent(addr)) {
|
if (directory.isPresent(addr)) {
|
||||||
|
|
||||||
if (state == State:I) {
|
if (state == State:I) {
|
||||||
|
@ -204,18 +207,24 @@ machine(Directory, "Directory protocol")
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||||
if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
|
if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
|
||||||
trigger(Event:Last_Unblock, in_msg.Address);
|
trigger(Event:Last_Unblock, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Unblock, in_msg.Address);
|
trigger(Event:Unblock, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Unblock, in_msg.Address);
|
trigger(Event:Exclusive_Unblock, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||||||
trigger(Event:Dirty_Writeback, in_msg.Address);
|
trigger(Event:Dirty_Writeback, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
||||||
trigger(Event:Clean_Writeback, in_msg.Address);
|
trigger(Event:Clean_Writeback, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -227,19 +236,21 @@ machine(Directory, "Directory protocol")
|
||||||
if (requestQueue_in.isReady()) {
|
if (requestQueue_in.isReady()) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:GETS, in_msg.Address);
|
trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:GETX, in_msg.Address);
|
trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||||
trigger(Event:PUTX, in_msg.Address);
|
trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
||||||
trigger(Event:PUTO, in_msg.Address);
|
trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
||||||
trigger(Event:PUTO_SHARERS, in_msg.Address);
|
trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address));
|
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address));
|
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -252,9 +263,9 @@ machine(Directory, "Directory protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.Address);
|
trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.Address);
|
trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -271,6 +282,7 @@ machine(Directory, "Directory protocol")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:WB_ACK;
|
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
|
out_msg.RequestorMachine := MachineType:Directory;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
|
@ -481,8 +493,8 @@ machine(Directory, "Directory protocol")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
if (TBEs.isPresent(address)) {
|
if (is_valid(tbe)) {
|
||||||
out_msg.OriginalRequestorMachId := TBEs[address].Requestor;
|
out_msg.OriginalRequestorMachId := tbe.Requestor;
|
||||||
}
|
}
|
||||||
out_msg.DataBlk := in_msg.DataBlk;
|
out_msg.DataBlk := in_msg.DataBlk;
|
||||||
out_msg.MessageSize := in_msg.MessageSize;
|
out_msg.MessageSize := in_msg.MessageSize;
|
||||||
|
@ -543,8 +555,8 @@ machine(Directory, "Directory protocol")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.SenderMachine := MachineType:Directory;
|
out_msg.SenderMachine := MachineType:Directory;
|
||||||
if (TBEs.isPresent(address)) {
|
if (is_valid(tbe)) {
|
||||||
out_msg.Destination.add(TBEs[address].Requestor);
|
out_msg.Destination.add(tbe.Requestor);
|
||||||
}
|
}
|
||||||
out_msg.DataBlk := in_msg.DataBlk;
|
out_msg.DataBlk := in_msg.DataBlk;
|
||||||
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
|
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
|
||||||
|
@ -561,23 +573,25 @@ machine(Directory, "Directory protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
|
action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
|
||||||
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk,
|
assert(is_valid(tbe));
|
||||||
addressOffset(TBEs[address].PhysicalAddress),
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
|
||||||
TBEs[address].Len);
|
addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
||||||
peek (requestQueue_in, RequestMsg) {
|
peek (requestQueue_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].PhysicalAddress := in_msg.Address;
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].Len := in_msg.Len;
|
tbe.PhysicalAddress := in_msg.Address;
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
tbe.Len := in_msg.Len;
|
||||||
TBEs[address].Requestor := in_msg.Requestor;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
|
tbe.Requestor := in_msg.Requestor;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -51,10 +51,13 @@ machine(DMA, "DMA Controller")
|
||||||
TBETable TBEs, template_hack="<DMA_TBE>";
|
TBETable TBEs, template_hack="<DMA_TBE>";
|
||||||
State cur_state;
|
State cur_state;
|
||||||
|
|
||||||
State getState(Address addr) {
|
void set_tbe(TBE b);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
|
State getState(TBE tbe, Address addr) {
|
||||||
return cur_state;
|
return cur_state;
|
||||||
}
|
}
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Address addr, State state) {
|
||||||
cur_state := state;
|
cur_state := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,9 +86,11 @@ machine(DMA, "DMA Controller")
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, SequencerMsg) {
|
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||||
if (in_msg.Type == SequencerRequestType:LD ) {
|
if (in_msg.Type == SequencerRequestType:LD ) {
|
||||||
trigger(Event:ReadRequest, in_msg.LineAddress);
|
trigger(Event:ReadRequest, in_msg.LineAddress,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else if (in_msg.Type == SequencerRequestType:ST) {
|
} else if (in_msg.Type == SequencerRequestType:ST) {
|
||||||
trigger(Event:WriteRequest, in_msg.LineAddress);
|
trigger(Event:WriteRequest, in_msg.LineAddress,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid request type");
|
error("Invalid request type");
|
||||||
}
|
}
|
||||||
|
@ -97,12 +102,15 @@ machine(DMA, "DMA Controller")
|
||||||
if (dmaResponseQueue_in.isReady()) {
|
if (dmaResponseQueue_in.isReady()) {
|
||||||
peek( dmaResponseQueue_in, ResponseMsg) {
|
peek( dmaResponseQueue_in, ResponseMsg) {
|
||||||
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||||
trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address));
|
trigger(Event:DMA_Ack, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
||||||
in_msg.Type == CoherenceResponseType:DATA) {
|
in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, makeLineAddress(in_msg.Address));
|
trigger(Event:Data, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address));
|
trigger(Event:Inv_Ack, makeLineAddress(in_msg.Address),
|
||||||
|
TBEs[makeLineAddress(in_msg.Address)]);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid response type");
|
error("Invalid response type");
|
||||||
}
|
}
|
||||||
|
@ -115,7 +123,7 @@ machine(DMA, "DMA Controller")
|
||||||
if (triggerQueue_in.isReady()) {
|
if (triggerQueue_in.isReady()) {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_Acks, in_msg.Address);
|
trigger(Event:All_Acks, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -156,7 +164,8 @@ machine(DMA, "DMA Controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
||||||
if (TBEs[address].NumAcks == 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumAcks == 0) {
|
||||||
enqueue(triggerQueue_out, TriggerMsg) {
|
enqueue(triggerQueue_out, TriggerMsg) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := TriggerType:ALL_ACKS;
|
out_msg.Type := TriggerType:ALL_ACKS;
|
||||||
|
@ -166,7 +175,8 @@ machine(DMA, "DMA Controller")
|
||||||
|
|
||||||
action(u_updateAckCount, "u", desc="Update ack count") {
|
action(u_updateAckCount, "u", desc="Update ack count") {
|
||||||
peek(dmaResponseQueue_in, ResponseMsg) {
|
peek(dmaResponseQueue_in, ResponseMsg) {
|
||||||
TBEs[address].NumAcks := TBEs[address].NumAcks - in_msg.Acks;
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,20 +203,24 @@ machine(DMA, "DMA Controller")
|
||||||
|
|
||||||
action(t_updateTBEData, "t", desc="Update TBE Data") {
|
action(t_updateTBEData, "t", desc="Update TBE Data") {
|
||||||
peek(dmaResponseQueue_in, ResponseMsg) {
|
peek(dmaResponseQueue_in, ResponseMsg) {
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
assert(is_valid(tbe));
|
||||||
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
|
action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
|
||||||
dma_sequencer.dataCallback(TBEs[address].DataBlk);
|
assert(is_valid(tbe));
|
||||||
|
dma_sequencer.dataCallback(tbe.DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
|
set_tbe(TBEs[address]);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(z_stall, "z", desc="dma is busy..stall") {
|
action(z_stall, "z", desc="dma is busy..stall") {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -148,35 +148,25 @@ machine(L2Cache, "Token protocol")
|
||||||
PersistentTable persistentTable;
|
PersistentTable persistentTable;
|
||||||
PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
|
PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
|
||||||
|
|
||||||
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
|
void set_cache_entry(AbstractCacheEntry b);
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
void unset_cache_entry();
|
||||||
return static_cast(Entry, L2cacheMemory[addr]);
|
|
||||||
}
|
Entry getCacheEntry(Address address), return_by_pointer="yes" {
|
||||||
assert(false);
|
Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
|
||||||
return static_cast(Entry, L2cacheMemory[addr]);
|
return cache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
int getTokens(Address addr) {
|
int getTokens(Entry cache_entry) {
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
return getL2CacheEntry(addr).Tokens;
|
return cache_entry.Tokens;
|
||||||
} else {
|
} else {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void changePermission(Address addr, AccessPermission permission) {
|
State getState(Entry cache_entry, Address addr) {
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
return L2cacheMemory.changePermission(addr, permission);
|
return cache_entry.CacheState;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isCacheTagPresent(Address addr) {
|
|
||||||
return (L2cacheMemory.isTagPresent(addr) );
|
|
||||||
}
|
|
||||||
|
|
||||||
State getState(Address addr) {
|
|
||||||
if (isCacheTagPresent(addr)) {
|
|
||||||
return getL2CacheEntry(addr).CacheState;
|
|
||||||
} else if (persistentTable.isLocked(addr) == true) {
|
} else if (persistentTable.isLocked(addr) == true) {
|
||||||
return State:I_L;
|
return State:I_L;
|
||||||
} else {
|
} else {
|
||||||
|
@ -184,57 +174,50 @@ machine(L2Cache, "Token protocol")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string getStateStr(Address addr) {
|
void setState(Entry cache_entry, Address addr, State state) {
|
||||||
return L2Cache_State_to_string(getState(addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
if (is_valid(cache_entry)) {
|
||||||
|
|
||||||
|
|
||||||
if (isCacheTagPresent(addr)) {
|
|
||||||
// Make sure the token count is in range
|
// Make sure the token count is in range
|
||||||
assert(getL2CacheEntry(addr).Tokens >= 0);
|
assert(cache_entry.Tokens >= 0);
|
||||||
assert(getL2CacheEntry(addr).Tokens <= max_tokens());
|
assert(cache_entry.Tokens <= max_tokens());
|
||||||
assert(getL2CacheEntry(addr).Tokens != (max_tokens() / 2));
|
assert(cache_entry.Tokens != (max_tokens() / 2));
|
||||||
|
|
||||||
// Make sure we have no tokens in L
|
// Make sure we have no tokens in L
|
||||||
if ((state == State:I_L) ) {
|
if ((state == State:I_L) ) {
|
||||||
if (isCacheTagPresent(addr)) {
|
assert(cache_entry.Tokens == 0);
|
||||||
assert(getL2CacheEntry(addr).Tokens == 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// in M and E you have all the tokens
|
// in M and E you have all the tokens
|
||||||
if (state == State:M ) {
|
if (state == State:M ) {
|
||||||
assert(getL2CacheEntry(addr).Tokens == max_tokens());
|
assert(cache_entry.Tokens == max_tokens());
|
||||||
}
|
}
|
||||||
|
|
||||||
// in NP you have no tokens
|
// in NP you have no tokens
|
||||||
if (state == State:NP) {
|
if (state == State:NP) {
|
||||||
assert(getL2CacheEntry(addr).Tokens == 0);
|
assert(cache_entry.Tokens == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// You have at least one token in S-like states
|
// You have at least one token in S-like states
|
||||||
if (state == State:S ) {
|
if (state == State:S ) {
|
||||||
assert(getL2CacheEntry(addr).Tokens > 0);
|
assert(cache_entry.Tokens > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// You have at least half the token in O-like states
|
// You have at least half the token in O-like states
|
||||||
if (state == State:O ) {
|
if (state == State:O ) {
|
||||||
assert(getL2CacheEntry(addr).Tokens > (max_tokens() / 2));
|
assert(cache_entry.Tokens > (max_tokens() / 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
getL2CacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
|
|
||||||
// Set permission
|
// Set permission
|
||||||
if (state == State:I) {
|
if (state == State:I) {
|
||||||
changePermission(addr, AccessPermission:Invalid);
|
cache_entry.changePermission(AccessPermission:Invalid);
|
||||||
} else if (state == State:S || state == State:O ) {
|
} else if (state == State:S || state == State:O ) {
|
||||||
changePermission(addr, AccessPermission:Read_Only);
|
cache_entry.changePermission(AccessPermission:Read_Only);
|
||||||
} else if (state == State:M ) {
|
} else if (state == State:M ) {
|
||||||
changePermission(addr, AccessPermission:Read_Write);
|
cache_entry.changePermission(AccessPermission:Read_Write);
|
||||||
} else {
|
} else {
|
||||||
changePermission(addr, AccessPermission:Invalid);
|
cache_entry.changePermission(AccessPermission:Invalid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -341,22 +324,24 @@ machine(L2Cache, "Token protocol")
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
// React to the message based on the current state of the table
|
// React to the message based on the current state of the table
|
||||||
if (persistentTable.isLocked(in_msg.Address)) {
|
if (persistentTable.isLocked(in_msg.Address)) {
|
||||||
|
|
||||||
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
|
if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
|
||||||
if (getTokens(in_msg.Address) == 1 ||
|
if (getTokens(cache_entry) == 1 ||
|
||||||
getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
|
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||||
trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
|
trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
|
||||||
|
cache_entry);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Persistent_GETS, in_msg.Address);
|
trigger(Event:Persistent_GETS, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Persistent_GETX, in_msg.Address);
|
trigger(Event:Persistent_GETX, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
|
trigger(Event:Own_Lock_or_Unlock, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -369,14 +354,16 @@ machine(L2Cache, "Token protocol")
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:Transient_GETX, in_msg.Address);
|
trigger(Event:Transient_GETX, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
|
if (getTokens(cache_entry) == 1) {
|
||||||
trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
|
trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
|
||||||
|
cache_entry);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:Transient_GETS, in_msg.Address);
|
trigger(Event:Transient_GETS, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
|
@ -389,15 +376,16 @@ machine(L2Cache, "Token protocol")
|
||||||
if (L1requestNetwork_in.isReady()) {
|
if (L1requestNetwork_in.isReady()) {
|
||||||
peek(L1requestNetwork_in, RequestMsg) {
|
peek(L1requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:L1_GETX, in_msg.Address);
|
trigger(Event:L1_GETX, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
if (getTokens(in_msg.Address) == 1 ||
|
if (getTokens(cache_entry) == 1 ||
|
||||||
getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
|
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||||
trigger(Event:L1_GETS_Last_Token, in_msg.Address);
|
trigger(Event:L1_GETS_Last_Token, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:L1_GETS, in_msg.Address);
|
trigger(Event:L1_GETS, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
|
@ -412,68 +400,80 @@ machine(L2Cache, "Token protocol")
|
||||||
if (responseNetwork_in.isReady()) {
|
if (responseNetwork_in.isReady()) {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
|
||||||
|
if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||||
trigger(Event:Ack, in_msg.Address);
|
trigger(Event:Ack, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
||||||
trigger(Event:Data_Owner, in_msg.Address);
|
trigger(Event:Data_Owner, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||||
trigger(Event:Data_Shared, in_msg.Address);
|
trigger(Event:Data_Shared, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
|
||||||
|
in_msg.Type == CoherenceResponseType:WB_OWNED ||
|
||||||
|
in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||||
|
|
||||||
if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
|
if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
|
||||||
|
|
||||||
// either room is available or the block is already present
|
// either room is available or the block is already present
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
|
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
|
||||||
assert(in_msg.Dirty == false);
|
assert(in_msg.Dirty == false);
|
||||||
trigger(Event:Writeback_Tokens, in_msg.Address);
|
trigger(Event:Writeback_Tokens, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||||
assert(in_msg.Dirty == false);
|
assert(in_msg.Dirty == false);
|
||||||
trigger(Event:Writeback_Shared_Data, in_msg.Address);
|
trigger(Event:Writeback_Shared_Data, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
|
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
|
||||||
//assert(in_msg.Dirty == false);
|
//assert(in_msg.Dirty == false);
|
||||||
trigger(Event:Writeback_Owned, in_msg.Address);
|
trigger(Event:Writeback_Owned, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
trigger(Event:L2_Replacement,
|
||||||
|
L2cacheMemory.cacheProbe(in_msg.Address),
|
||||||
|
getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
||||||
trigger(Event:L1_INV, in_msg.Address);
|
trigger(Event:L1_INV, in_msg.Address, cache_entry);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||||
trigger(Event:Ack_All_Tokens, in_msg.Address);
|
trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
|
||||||
trigger(Event:Data_All_Tokens, in_msg.Address);
|
in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry);
|
||||||
if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
|
||||||
|
in_msg.Type == CoherenceResponseType:WB_OWNED ||
|
||||||
|
in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||||
|
if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
|
||||||
|
|
||||||
// either room is available or the block is already present
|
// either room is available or the block is already present
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
|
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
|
||||||
assert(in_msg.Dirty == false);
|
assert(in_msg.Dirty == false);
|
||||||
assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
|
assert( (getState(cache_entry, in_msg.Address) != State:NP)
|
||||||
trigger(Event:Writeback_All_Tokens, in_msg.Address);
|
&& (getState(cache_entry, in_msg.Address) != State:I) );
|
||||||
|
trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||||
assert(in_msg.Dirty == false);
|
assert(in_msg.Dirty == false);
|
||||||
trigger(Event:Writeback_All_Tokens, in_msg.Address);
|
trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
|
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
|
||||||
trigger(Event:Writeback_All_Tokens, in_msg.Address);
|
trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
trigger(Event:L2_Replacement,
|
||||||
|
L2cacheMemory.cacheProbe(in_msg.Address),
|
||||||
|
getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
||||||
trigger(Event:L1_INV, in_msg.Address);
|
trigger(Event:L1_INV, in_msg.Address, cache_entry);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
|
@ -536,29 +536,31 @@ machine(L2Cache, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(c_cleanReplacement, "c", desc="Issue clean writeback") {
|
action(c_cleanReplacement, "c", desc="Issue clean writeback") {
|
||||||
if (getL2CacheEntry(address).Tokens > 0) {
|
assert(is_valid(cache_entry));
|
||||||
|
if (cache_entry.Tokens > 0) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:ACK;
|
out_msg.Type := CoherenceResponseType:ACK;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
|
action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
|
|
||||||
if (getL2CacheEntry(address).Dirty) {
|
if (cache_entry.Dirty) {
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
} else {
|
} else {
|
||||||
|
@ -566,23 +568,24 @@ machine(L2Cache, "Token protocol")
|
||||||
out_msg.Type := CoherenceResponseType:ACK_OWNER;
|
out_msg.Type := CoherenceResponseType:ACK_OWNER;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
|
action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
if (getL2CacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
|
assert(is_valid(cache_entry));
|
||||||
|
if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.Tokens := N_tokens;
|
out_msg.Tokens := N_tokens;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
|
cache_entry.Tokens := cache_entry.Tokens - N_tokens;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
|
@ -591,109 +594,115 @@ machine(L2Cache, "Token protocol")
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.Tokens := 1;
|
out_msg.Tokens := 1;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
|
cache_entry.Tokens := cache_entry.Tokens - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
|
action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
assert(getL2CacheEntry(address).Tokens >= 1);
|
assert(cache_entry.Tokens >= 1);
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
|
action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
|
||||||
if (getL2CacheEntry(address).Tokens > 0) {
|
assert(is_valid(cache_entry));
|
||||||
|
if (cache_entry.Tokens > 0) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:ACK;
|
out_msg.Type := CoherenceResponseType:ACK;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||||
assert(getL2CacheEntry(address).Tokens >= 1);
|
assert(cache_entry.Tokens >= 1);
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
|
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||||
assert(getL2CacheEntry(address).Tokens >= 1);
|
assert(cache_entry.Tokens >= 1);
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
|
action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
|
||||||
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
||||||
assert(getL2CacheEntry(address).Tokens > 0);
|
assert(is_valid(cache_entry));
|
||||||
if (getL2CacheEntry(address).Tokens > 1) {
|
assert(cache_entry.Tokens > 0);
|
||||||
|
if (cache_entry.Tokens > 1) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:ACK;
|
out_msg.Type := CoherenceResponseType:ACK;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||||
assert(getL2CacheEntry(address).Tokens >= 1);
|
assert(cache_entry.Tokens >= 1);
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
|
out_msg.Tokens := cache_entry.Tokens - 1;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 1;
|
cache_entry.Tokens := 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
|
action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
|
||||||
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
||||||
assert(getL2CacheEntry(address).Tokens > (max_tokens() / 2) + 1);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
|
out_msg.Tokens := cache_entry.Tokens - 1;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 1;
|
cache_entry.Tokens := 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
|
action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
|
||||||
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
//assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
||||||
assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -791,58 +800,60 @@ machine(L2Cache, "Token protocol")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
|
action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
|
||||||
peek(L1requestNetwork_in, RequestMsg) {
|
peek(L1requestNetwork_in, RequestMsg) {
|
||||||
assert(getL2CacheEntry(address).Tokens > 0);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.Tokens > 0);
|
||||||
//enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
|
//enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
|
||||||
out_msg.Tokens := 1;
|
out_msg.Tokens := 1;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
|
cache_entry.Tokens := cache_entry.Tokens - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
|
action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
|
||||||
peek(L1requestNetwork_in, RequestMsg) {
|
peek(L1requestNetwork_in, RequestMsg) {
|
||||||
assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
|
action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
|
||||||
peek(L1requestNetwork_in, RequestMsg) {
|
peek(L1requestNetwork_in, RequestMsg) {
|
||||||
// assert(getL2CacheEntry(address).Tokens == max_tokens());
|
assert(is_valid(cache_entry));
|
||||||
|
// assert(cache_entry.Tokens == max_tokens());
|
||||||
//enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
|
//enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
|
out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
|
||||||
//out_msg.Tokens := max_tokens();
|
//out_msg.Tokens := max_tokens();
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -865,13 +876,14 @@ machine(L2Cache, "Token protocol")
|
||||||
|
|
||||||
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
|
action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
assert(in_msg.Tokens != 0);
|
assert(in_msg.Tokens != 0);
|
||||||
getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
|
cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
|
||||||
|
|
||||||
// this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
|
// this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
|
||||||
// may not trigger this action.
|
// may not trigger this action.
|
||||||
if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
|
if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
|
||||||
getL2CacheEntry(address).Dirty := true;
|
cache_entry.Dirty := true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -895,61 +907,65 @@ machine(L2Cache, "Token protocol")
|
||||||
action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
|
action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
|
||||||
peek(L1requestNetwork_in, RequestMsg) {
|
peek(L1requestNetwork_in, RequestMsg) {
|
||||||
if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
|
if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
|
||||||
(isCacheTagPresent(address))) {
|
(is_valid(cache_entry))) {
|
||||||
L2cacheMemory.setMRU(address);
|
L2cacheMemory.setMRU(address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
|
action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
|
||||||
if (getL2CacheEntry(address).Tokens > 0) {
|
assert(is_valid(cache_entry));
|
||||||
|
if (cache_entry.Tokens > 0) {
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:ACK;
|
out_msg.Type := CoherenceResponseType:ACK;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
assert(getL2CacheEntry(address).Tokens >= 1);
|
assert(cache_entry.Tokens >= 1);
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
|
action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
|
||||||
if (getL2CacheEntry(address).Tokens > 0) {
|
assert(is_valid(cache_entry));
|
||||||
|
if (cache_entry.Tokens > 0) {
|
||||||
peek(L1requestNetwork_in, RequestMsg) {
|
peek(L1requestNetwork_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:ACK;
|
out_msg.Type := CoherenceResponseType:ACK;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
assert(getL2CacheEntry(address).Tokens >= 1);
|
assert(cache_entry.Tokens >= 1);
|
||||||
out_msg.Tokens := getL2CacheEntry(address).Tokens;
|
out_msg.Tokens := cache_entry.Tokens;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
getL2CacheEntry(address).Tokens := 0;
|
cache_entry.Tokens := 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
getL2CacheEntry(address).Dirty := in_msg.Dirty;
|
if ((cache_entry.Dirty == false) && in_msg.Dirty) {
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
|
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
|
||||||
L2cacheMemory.allocate(address, new Entry);
|
set_cache_entry(L2cacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
||||||
L2cacheMemory.deallocate(address);
|
L2cacheMemory.deallocate(address);
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
||||||
|
@ -965,7 +981,8 @@ machine(L2Cache, "Token protocol")
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
if (in_msg.Type != CoherenceResponseType:ACK &&
|
if (in_msg.Type != CoherenceResponseType:ACK &&
|
||||||
in_msg.Type != CoherenceResponseType:WB_TOKENS) {
|
in_msg.Type != CoherenceResponseType:WB_TOKENS) {
|
||||||
assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.DataBlk == in_msg.DataBlk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,21 +162,24 @@ machine(Directory, "Token protocol")
|
||||||
bool starving, default="false";
|
bool starving, default="false";
|
||||||
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
||||||
|
|
||||||
|
void set_tbe(TBE b);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
||||||
return static_cast(Entry, directory[addr]);
|
return static_cast(Entry, directory[addr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(TBE tbe, Address addr) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
return TBEs[addr].TBEState;
|
return tbe.TBEState;
|
||||||
} else {
|
} else {
|
||||||
return getDirectoryEntry(addr).DirectoryState;
|
return getDirectoryEntry(addr).DirectoryState;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Address addr, State state) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
getDirectoryEntry(addr).DirectoryState := state;
|
getDirectoryEntry(addr).DirectoryState := state;
|
||||||
|
|
||||||
|
@ -223,9 +226,9 @@ machine(Directory, "Token protocol")
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.Address);
|
trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.Address);
|
trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -237,7 +240,8 @@ machine(Directory, "Token protocol")
|
||||||
// Reissue Timer
|
// Reissue Timer
|
||||||
in_port(reissueTimerTable_in, Address, reissueTimerTable) {
|
in_port(reissueTimerTable_in, Address, reissueTimerTable) {
|
||||||
if (reissueTimerTable_in.isReady()) {
|
if (reissueTimerTable_in.isReady()) {
|
||||||
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
|
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
|
||||||
|
TBEs[reissueTimerTable.readyAddress()]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,23 +252,29 @@ machine(Directory, "Token protocol")
|
||||||
if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
|
if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
|
||||||
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
|
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
|
||||||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
||||||
trigger(Event:Data_All_Tokens, in_msg.Address);
|
trigger(Event:Data_All_Tokens, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
||||||
trigger(Event:Ack_Owner_All_Tokens, in_msg.Address);
|
trigger(Event:Ack_Owner_All_Tokens, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack_All_Tokens, in_msg.Address);
|
trigger(Event:Ack_All_Tokens, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
||||||
trigger(Event:Data_Owner, in_msg.Address);
|
trigger(Event:Data_Owner, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
|
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
|
||||||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
||||||
trigger(Event:Tokens, in_msg.Address);
|
trigger(Event:Tokens, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
||||||
trigger(Event:Ack_Owner, in_msg.Address);
|
trigger(Event:Ack_Owner, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -295,30 +305,39 @@ machine(Directory, "Token protocol")
|
||||||
if (persistentTable.isLocked(in_msg.Address)) {
|
if (persistentTable.isLocked(in_msg.Address)) {
|
||||||
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
|
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
|
||||||
if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
|
if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
|
||||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address);
|
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
|
trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Lockdown, in_msg.Address); // locked
|
// locked
|
||||||
|
trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Unlockdown, in_msg.Address); // unlocked
|
// unlocked
|
||||||
|
trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
|
if (persistentTable.findSmallest(in_msg.Address) == machineID) {
|
||||||
if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
|
if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
|
||||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address);
|
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
|
trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
|
||||||
|
TBEs[in_msg.Address]);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
||||||
trigger(Event:Lockdown, in_msg.Address); // locked
|
// locked
|
||||||
|
trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
||||||
trigger(Event:Lockdown, in_msg.Address); // locked
|
// locked
|
||||||
|
trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
||||||
trigger(Event:Unlockdown, in_msg.Address); // unlocked
|
// unlocked
|
||||||
|
trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -332,9 +351,9 @@ machine(Directory, "Token protocol")
|
||||||
peek(requestNetwork_in, RequestMsg) {
|
peek(requestNetwork_in, RequestMsg) {
|
||||||
assert(in_msg.Destination.isElement(machineID));
|
assert(in_msg.Destination.isElement(machineID));
|
||||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
trigger(Event:GETS, in_msg.Address);
|
trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:GETX, in_msg.Address);
|
trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -346,12 +365,14 @@ machine(Directory, "Token protocol")
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
if (in_msg.Type == DMARequestType:READ) {
|
if (in_msg.Type == DMARequestType:READ) {
|
||||||
trigger(Event:DMA_READ, in_msg.LineAddress);
|
trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
|
||||||
} else if (in_msg.Type == DMARequestType:WRITE) {
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
||||||
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
|
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
|
||||||
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress);
|
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:DMA_WRITE, in_msg.LineAddress);
|
trigger(Event:DMA_WRITE, in_msg.LineAddress,
|
||||||
|
TBEs[in_msg.LineAddress]);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -408,7 +429,7 @@ machine(Directory, "Token protocol")
|
||||||
markPersistentEntries(address);
|
markPersistentEntries(address);
|
||||||
starving := true;
|
starving := true;
|
||||||
|
|
||||||
TBEs[address].WentPersistent := true;
|
tbe.WentPersistent := true;
|
||||||
|
|
||||||
// Do not schedule a wakeup, a persistent requests will always complete
|
// Do not schedule a wakeup, a persistent requests will always complete
|
||||||
} else {
|
} else {
|
||||||
|
@ -478,7 +499,7 @@ machine(Directory, "Token protocol")
|
||||||
markPersistentEntries(address);
|
markPersistentEntries(address);
|
||||||
starving := true;
|
starving := true;
|
||||||
|
|
||||||
TBEs[address].WentPersistent := true;
|
tbe.WentPersistent := true;
|
||||||
|
|
||||||
// Do not schedule a wakeup, a persistent requests will always complete
|
// Do not schedule a wakeup, a persistent requests will always complete
|
||||||
} else {
|
} else {
|
||||||
|
@ -574,7 +595,7 @@ machine(Directory, "Token protocol")
|
||||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||||
assert(getDirectoryEntry(address).Tokens > 0);
|
assert(getDirectoryEntry(address).Tokens > 0);
|
||||||
out_msg.Tokens := getDirectoryEntry(address).Tokens;
|
out_msg.Tokens := getDirectoryEntry(address).Tokens;
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := false;
|
out_msg.Dirty := false;
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
|
@ -634,9 +655,9 @@ machine(Directory, "Token protocol")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||||
// first, initialize the data blk to the current version of system memory
|
// first, initialize the data blk to the current version of system memory
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
// then add the dma write data
|
// then add the dma write data
|
||||||
out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg);
|
DPRINTF(RubySlicc, "%s\n", out_msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -646,7 +667,7 @@ machine(Directory, "Token protocol")
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||||
// first, initialize the data blk to the current version of system memory
|
// first, initialize the data blk to the current version of system memory
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg);
|
DPRINTF(RubySlicc, "%s\n", out_msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -654,17 +675,18 @@ machine(Directory, "Token protocol")
|
||||||
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DmaDataBlk := in_msg.DataBlk;
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
|
tbe.DmaDataBlk := in_msg.DataBlk;
|
||||||
TBEs[address].Len := in_msg.Len;
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
||||||
TBEs[address].DmaRequestor := in_msg.Requestor;
|
tbe.Len := in_msg.Len;
|
||||||
TBEs[address].WentPersistent := false;
|
tbe.DmaRequestor := in_msg.Requestor;
|
||||||
|
tbe.WentPersistent := false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
||||||
|
|
||||||
if (TBEs[address].WentPersistent) {
|
if (tbe.WentPersistent) {
|
||||||
assert(starving == true);
|
assert(starving == true);
|
||||||
|
|
||||||
enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
|
enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
|
||||||
|
@ -692,21 +714,22 @@ machine(Directory, "Token protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
|
action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
|
||||||
peek(responseNetwork_in, ResponseMsg) {
|
peek(responseNetwork_in, ResponseMsg) {
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
|
action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
|
||||||
TBEs[address].DataBlk := getDirectoryEntry(address).DataBlk;
|
tbe.DataBlk := getDirectoryEntry(address).DataBlk;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
||||||
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
getDirectoryEntry(address).DataBlk := tbe.DataBlk;
|
||||||
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
|
action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
|
||||||
|
@ -837,7 +860,7 @@ machine(Directory, "Token protocol")
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:ACK;
|
out_msg.Type := DMAResponseType:ACK;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -853,7 +876,7 @@ machine(Directory, "Token protocol")
|
||||||
// split it up if need be
|
// split it up if need be
|
||||||
//
|
//
|
||||||
out_msg.DataBlk := in_msg.DataBlk;
|
out_msg.DataBlk := in_msg.DataBlk;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -870,7 +893,7 @@ machine(Directory, "Token protocol")
|
||||||
// split it up if need be
|
// split it up if need be
|
||||||
//
|
//
|
||||||
out_msg.DataBlk := in_msg.DataBlk;
|
out_msg.DataBlk := in_msg.DataBlk;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,59 +153,66 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
TBETable TBEs, template_hack="<L1Cache_TBE>";
|
TBETable TBEs, template_hack="<L1Cache_TBE>";
|
||||||
|
|
||||||
Entry getCacheEntry(Address addr), return_by_ref="yes" {
|
void set_cache_entry(AbstractCacheEntry b);
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
void unset_cache_entry();
|
||||||
return static_cast(Entry, L2cacheMemory[addr]);
|
void set_tbe(TBE b);
|
||||||
} else if (L1DcacheMemory.isTagPresent(addr)) {
|
void unset_tbe();
|
||||||
return static_cast(Entry, L1DcacheMemory[addr]);
|
|
||||||
} else {
|
Entry getCacheEntry(Address address), return_by_pointer="yes" {
|
||||||
return static_cast(Entry, L1IcacheMemory[addr]);
|
Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
|
||||||
}
|
if(is_valid(L2cache_entry)) {
|
||||||
|
return L2cache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
void changePermission(Address addr, AccessPermission permission) {
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
|
||||||
if (L2cacheMemory.isTagPresent(addr)) {
|
if(is_valid(L1Dcache_entry)) {
|
||||||
return L2cacheMemory.changePermission(addr, permission);
|
return L1Dcache_entry;
|
||||||
} else if (L1DcacheMemory.isTagPresent(addr)) {
|
|
||||||
return L1DcacheMemory.changePermission(addr, permission);
|
|
||||||
} else {
|
|
||||||
return L1IcacheMemory.changePermission(addr, permission);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isCacheTagPresent(Address addr) {
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
|
||||||
return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
|
return L1Icache_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
|
||||||
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
return L2cache_entry;
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
}
|
||||||
|
|
||||||
if(TBEs.isPresent(addr)) {
|
Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
|
||||||
return TBEs[addr].TBEState;
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
|
||||||
} else if (isCacheTagPresent(addr)) {
|
return L1Dcache_entry;
|
||||||
return getCacheEntry(addr).CacheState;
|
}
|
||||||
|
|
||||||
|
Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
|
||||||
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
|
||||||
|
return L1Icache_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
State getState(TBE tbe, Entry cache_entry, Address addr) {
|
||||||
|
if(is_valid(tbe)) {
|
||||||
|
return tbe.TBEState;
|
||||||
|
} else if (is_valid(cache_entry)) {
|
||||||
|
return cache_entry.CacheState;
|
||||||
}
|
}
|
||||||
return State:I;
|
return State:I;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
||||||
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
||||||
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
||||||
|
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isCacheTagPresent(addr)) {
|
if (is_valid(cache_entry)) {
|
||||||
getCacheEntry(addr).CacheState := state;
|
cache_entry.CacheState := state;
|
||||||
|
|
||||||
// Set permission
|
// Set permission
|
||||||
if ((state == State:MM) ||
|
if ((state == State:MM) ||
|
||||||
(state == State:MM_W)) {
|
(state == State:MM_W)) {
|
||||||
changePermission(addr, AccessPermission:Read_Write);
|
cache_entry.changePermission(AccessPermission:Read_Write);
|
||||||
} else if (state == State:S ||
|
} else if (state == State:S ||
|
||||||
state == State:O ||
|
state == State:O ||
|
||||||
state == State:M ||
|
state == State:M ||
|
||||||
|
@ -214,9 +221,9 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
state == State:ISM ||
|
state == State:ISM ||
|
||||||
state == State:OM ||
|
state == State:OM ||
|
||||||
state == State:SS) {
|
state == State:SS) {
|
||||||
changePermission(addr, AccessPermission:Read_Only);
|
cache_entry.changePermission(AccessPermission:Read_Only);
|
||||||
} else {
|
} else {
|
||||||
changePermission(addr, AccessPermission:Invalid);
|
cache_entry.changePermission(AccessPermission:Invalid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -244,15 +251,20 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GenericMachineType testAndClearLocalHit(Address addr) {
|
GenericMachineType testAndClearLocalHit(Entry cache_entry) {
|
||||||
if (getCacheEntry(addr).FromL2) {
|
if (is_valid(cache_entry) && cache_entry.FromL2) {
|
||||||
getCacheEntry(addr).FromL2 := false;
|
cache_entry.FromL2 := false;
|
||||||
return GenericMachineType:L2Cache;
|
return GenericMachineType:L2Cache;
|
||||||
} else {
|
} else {
|
||||||
return GenericMachineType:L1Cache;
|
return GenericMachineType:L1Cache;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsAtomicAccessed(Entry cache_entry) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
|
return cache_entry.AtomicAccessed;
|
||||||
|
}
|
||||||
|
|
||||||
MessageBuffer triggerQueue, ordered="true";
|
MessageBuffer triggerQueue, ordered="true";
|
||||||
|
|
||||||
// ** OUT_PORTS **
|
// ** OUT_PORTS **
|
||||||
|
@ -268,12 +280,16 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
|
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
|
||||||
if (triggerQueue_in.isReady()) {
|
if (triggerQueue_in.isReady()) {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
|
|
||||||
if (in_msg.Type == TriggerType:L2_to_L1) {
|
if (in_msg.Type == TriggerType:L2_to_L1) {
|
||||||
trigger(Event:Complete_L2_to_L1, in_msg.Address);
|
trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == TriggerType:ALL_ACKS) {
|
} else if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_acks, in_msg.Address);
|
trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
||||||
trigger(Event:All_acks_no_sharers, in_msg.Address);
|
trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -287,30 +303,34 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
in_port(forwardToCache_in, RequestMsg, forwardToCache) {
|
in_port(forwardToCache_in, RequestMsg, forwardToCache) {
|
||||||
if (forwardToCache_in.isReady()) {
|
if (forwardToCache_in.isReady()) {
|
||||||
peek(forwardToCache_in, RequestMsg, block_on="Address") {
|
peek(forwardToCache_in, RequestMsg, block_on="Address") {
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||||
trigger(Event:Other_GETX, in_msg.Address);
|
trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
|
||||||
trigger(Event:Merged_GETS, in_msg.Address);
|
trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||||
if (machineCount(MachineType:L1Cache) > 1) {
|
if (machineCount(MachineType:L1Cache) > 1) {
|
||||||
if (isCacheTagPresent(in_msg.Address)) {
|
if (is_valid(cache_entry)) {
|
||||||
if (getCacheEntry(in_msg.Address).AtomicAccessed && no_mig_atomic) {
|
if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
|
||||||
trigger(Event:Other_GETS_No_Mig, in_msg.Address);
|
trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Other_GETS, in_msg.Address);
|
trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Other_GETS, in_msg.Address);
|
trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:NC_DMA_GETS, in_msg.Address);
|
trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||||
trigger(Event:Invalidate, in_msg.Address);
|
trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||||
trigger(Event:Writeback_Ack, in_msg.Address);
|
trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||||
trigger(Event:Writeback_Nack, in_msg.Address);
|
trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -322,16 +342,20 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
in_port(responseToCache_in, ResponseMsg, responseToCache) {
|
in_port(responseToCache_in, ResponseMsg, responseToCache) {
|
||||||
if (responseToCache_in.isReady()) {
|
if (responseToCache_in.isReady()) {
|
||||||
peek(responseToCache_in, ResponseMsg, block_on="Address") {
|
peek(responseToCache_in, ResponseMsg, block_on="Address") {
|
||||||
|
|
||||||
|
Entry cache_entry := getCacheEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
|
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack, in_msg.Address);
|
trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
||||||
trigger(Event:Shared_Ack, in_msg.Address);
|
trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||||
trigger(Event:Shared_Data, in_msg.Address);
|
trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Data, in_msg.Address);
|
trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -347,41 +371,58 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
|
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
|
||||||
|
|
||||||
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
||||||
|
TBE tbe := TBEs[in_msg.LineAddress];
|
||||||
|
|
||||||
if (in_msg.Type == CacheRequestType:IFETCH) {
|
if (in_msg.Type == CacheRequestType:IFETCH) {
|
||||||
// ** INSTRUCTION ACCESS ***
|
// ** INSTRUCTION ACCESS ***
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The block is in the wrong L1, try to write it to the L2
|
// The block is in the wrong L1, try to write it to the L2
|
||||||
if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
trigger(Event:L1_to_L2, in_msg.LineAddress);
|
trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L2_Replacement,
|
||||||
|
L2cacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL2CacheEntry(L2cacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[L2cacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
|
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Icache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1
|
// L1 does't have the line, but we have space for it in the L1
|
||||||
if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
|
|
||||||
|
Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L2cache_entry)) {
|
||||||
// L2 has it (maybe not with the right permissions)
|
// L2 has it (maybe not with the right permissions)
|
||||||
trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress);
|
trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
|
||||||
|
L2cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
// We have room, the L2 doesn't have it, so the L1 fetches the line
|
// We have room, the L2 doesn't have it, so the L1 fetches the line
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Icache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room
|
// No room in the L1, so we need to make room
|
||||||
if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
|
if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
|
||||||
// The L2 has room, so we move the line from the L1 to the L2
|
// The L2 has room, so we move the line from the L1 to the L2
|
||||||
trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L1_to_L2,
|
||||||
|
L1IcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
} else {
|
} else {
|
||||||
// The L2 does not have room, so we replace a line from the L2
|
// The L2 does not have room, so we replace a line from the L2
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)));
|
trigger(Event:L2_Replacement,
|
||||||
|
L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
getL2CacheEntry(L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress))),
|
||||||
|
TBEs[L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress))]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -389,36 +430,51 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
// *** DATA ACCESS ***
|
// *** DATA ACCESS ***
|
||||||
|
|
||||||
// Check to see if it is in the OTHER L1
|
// Check to see if it is in the OTHER L1
|
||||||
if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Icache_entry)) {
|
||||||
// The block is in the wrong L1, try to write it to the L2
|
// The block is in the wrong L1, try to write it to the L2
|
||||||
if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
trigger(Event:L1_to_L2, in_msg.LineAddress);
|
trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L2_Replacement,
|
||||||
|
L2cacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL2CacheEntry(L2cacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[L2cacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L1Dcache_entry)) {
|
||||||
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
|
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Dcache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
|
||||||
// L1 does't have the line, but we have space for it in the L1
|
// L1 does't have the line, but we have space for it in the L1
|
||||||
if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
|
Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
|
||||||
|
if (is_valid(L2cache_entry)) {
|
||||||
// L2 has it (maybe not with the right permissions)
|
// L2 has it (maybe not with the right permissions)
|
||||||
trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress);
|
trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
|
||||||
|
L2cache_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
// We have room, the L2 doesn't have it, so the L1 fetches the line
|
// We have room, the L2 doesn't have it, so the L1 fetches the line
|
||||||
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
||||||
|
in_msg.LineAddress, L1Dcache_entry, tbe);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// No room in the L1, so we need to make room
|
// No room in the L1, so we need to make room
|
||||||
if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
|
if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
|
||||||
// The L2 has room, so we move the line from the L1 to the L2
|
// The L2 has room, so we move the line from the L1 to the L2
|
||||||
trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
|
trigger(Event:L1_to_L2,
|
||||||
|
L1DcacheMemory.cacheProbe(in_msg.LineAddress),
|
||||||
|
getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
|
||||||
} else {
|
} else {
|
||||||
// The L2 does not have room, so we replace a line from the L2
|
// The L2 does not have room, so we replace a line from the L2
|
||||||
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)));
|
trigger(Event:L2_Replacement,
|
||||||
|
L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
|
||||||
|
getL2CacheEntry(L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress))),
|
||||||
|
TBEs[L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress))]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -431,37 +487,40 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(a_issueGETS, "a", desc="Issue GETS") {
|
action(a_issueGETS, "a", desc="Issue GETS") {
|
||||||
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
|
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:GETS;
|
out_msg.Type := CoherenceRequestType:GETS;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
out_msg.InitialRequestTime := get_time();
|
out_msg.InitialRequestTime := get_time();
|
||||||
TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
|
tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(b_issueGETX, "b", desc="Issue GETX") {
|
action(b_issueGETX, "b", desc="Issue GETX") {
|
||||||
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
|
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:GETX;
|
out_msg.Type := CoherenceRequestType:GETX;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
out_msg.InitialRequestTime := get_time();
|
out_msg.InitialRequestTime := get_time();
|
||||||
TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
|
tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
|
action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
|
||||||
peek(forwardToCache_in, RequestMsg) {
|
peek(forwardToCache_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
if (in_msg.DirectedProbe) {
|
if (in_msg.DirectedProbe) {
|
||||||
out_msg.Acks := machineCount(MachineType:L1Cache);
|
out_msg.Acks := machineCount(MachineType:L1Cache);
|
||||||
} else {
|
} else {
|
||||||
|
@ -487,12 +546,13 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
||||||
peek(forwardToCache_in, RequestMsg) {
|
peek(forwardToCache_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
if (in_msg.DirectedProbe) {
|
if (in_msg.DirectedProbe) {
|
||||||
out_msg.Acks := machineCount(MachineType:L1Cache);
|
out_msg.Acks := machineCount(MachineType:L1Cache);
|
||||||
} else {
|
} else {
|
||||||
|
@ -508,13 +568,14 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
|
action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
|
||||||
peek(forwardToCache_in, RequestMsg) {
|
peek(forwardToCache_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
|
DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
|
||||||
if (in_msg.DirectedProbe) {
|
if (in_msg.DirectedProbe) {
|
||||||
out_msg.Acks := machineCount(MachineType:L1Cache);
|
out_msg.Acks := machineCount(MachineType:L1Cache);
|
||||||
} else {
|
} else {
|
||||||
|
@ -530,13 +591,14 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
|
action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
|
||||||
peek(forwardToCache_in, RequestMsg) {
|
peek(forwardToCache_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination := in_msg.MergedRequestors;
|
out_msg.Destination := in_msg.MergedRequestors;
|
||||||
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
out_msg.DataBlk := cache_entry.DataBlk;
|
||||||
|
out_msg.Dirty := cache_entry.Dirty;
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
|
DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
|
||||||
out_msg.Dirty := getCacheEntry(address).Dirty;
|
|
||||||
out_msg.Acks := machineCount(MachineType:L1Cache);
|
out_msg.Acks := machineCount(MachineType:L1Cache);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
||||||
|
@ -599,87 +661,91 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
|
action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
|
||||||
enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:UNBLOCKS;
|
out_msg.Type := CoherenceResponseType:UNBLOCKS;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.CurOwner := TBEs[address].CurOwner;
|
out_msg.CurOwner := tbe.CurOwner;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
|
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
sequencer.readCallback(address,
|
sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
|
||||||
testAndClearLocalHit(address),
|
cache_entry.DataBlk);
|
||||||
getCacheEntry(address).DataBlk);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
action(hx_external_load_hit, "hx", desc="load required external msgs") {
|
action(hx_external_load_hit, "hx", desc="load required external msgs") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(is_valid(tbe));
|
||||||
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
peek(responseToCache_in, ResponseMsg) {
|
peek(responseToCache_in, ResponseMsg) {
|
||||||
|
|
||||||
sequencer.readCallback(address,
|
sequencer.readCallback(address,
|
||||||
getNondirectHitMachType(in_msg.Address, in_msg.Sender),
|
getNondirectHitMachType(in_msg.Address, in_msg.Sender),
|
||||||
getCacheEntry(address).DataBlk,
|
cache_entry.DataBlk,
|
||||||
TBEs[address].InitialRequestTime,
|
tbe.InitialRequestTime,
|
||||||
TBEs[address].ForwardRequestTime,
|
tbe.ForwardRequestTime,
|
||||||
TBEs[address].FirstResponseTime);
|
tbe.FirstResponseTime);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
|
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
peek(mandatoryQueue_in, CacheMsg) {
|
peek(mandatoryQueue_in, CacheMsg) {
|
||||||
sequencer.writeCallback(address,
|
sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
|
||||||
testAndClearLocalHit(address),
|
cache_entry.DataBlk);
|
||||||
getCacheEntry(address).DataBlk);
|
|
||||||
|
|
||||||
getCacheEntry(address).Dirty := true;
|
cache_entry.Dirty := true;
|
||||||
if (in_msg.Type == CacheRequestType:ATOMIC) {
|
if (in_msg.Type == CacheRequestType:ATOMIC) {
|
||||||
getCacheEntry(address).AtomicAccessed := true;
|
cache_entry.AtomicAccessed := true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(sx_external_store_hit, "sx", desc="store required external msgs.") {
|
action(sx_external_store_hit, "sx", desc="store required external msgs.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(is_valid(tbe));
|
||||||
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
peek(responseToCache_in, ResponseMsg) {
|
peek(responseToCache_in, ResponseMsg) {
|
||||||
|
|
||||||
sequencer.writeCallback(address,
|
sequencer.writeCallback(address,
|
||||||
getNondirectHitMachType(address, in_msg.Sender),
|
getNondirectHitMachType(address, in_msg.Sender),
|
||||||
getCacheEntry(address).DataBlk,
|
cache_entry.DataBlk,
|
||||||
TBEs[address].InitialRequestTime,
|
tbe.InitialRequestTime,
|
||||||
TBEs[address].ForwardRequestTime,
|
tbe.ForwardRequestTime,
|
||||||
TBEs[address].FirstResponseTime);
|
tbe.FirstResponseTime);
|
||||||
|
|
||||||
}
|
}
|
||||||
getCacheEntry(address).Dirty := true;
|
cache_entry.Dirty := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
|
action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(is_valid(tbe));
|
||||||
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
||||||
|
|
||||||
sequencer.writeCallback(address,
|
sequencer.writeCallback(address,
|
||||||
getNondirectHitMachType(address,
|
getNondirectHitMachType(address, tbe.LastResponder),
|
||||||
TBEs[address].LastResponder),
|
cache_entry.DataBlk,
|
||||||
getCacheEntry(address).DataBlk,
|
tbe.InitialRequestTime,
|
||||||
TBEs[address].InitialRequestTime,
|
tbe.ForwardRequestTime,
|
||||||
TBEs[address].ForwardRequestTime,
|
tbe.FirstResponseTime);
|
||||||
TBEs[address].FirstResponseTime);
|
|
||||||
|
|
||||||
getCacheEntry(address).Dirty := true;
|
cache_entry.Dirty := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
||||||
check_allocate(TBEs);
|
check_allocate(TBEs);
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].Dirty := getCacheEntry(address).Dirty;
|
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
||||||
TBEs[address].Sharers := false;
|
tbe.Dirty := cache_entry.Dirty;
|
||||||
|
tbe.Sharers := false;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
|
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
|
||||||
|
@ -695,43 +761,49 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
|
action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
|
||||||
getCacheEntry(address).Dirty := TBEs[address].Dirty;
|
assert(is_valid(cache_entry));
|
||||||
getCacheEntry(address).DataBlk := TBEs[address].DataBlk;
|
assert(is_valid(tbe));
|
||||||
|
cache_entry.Dirty := tbe.Dirty;
|
||||||
|
cache_entry.DataBlk := tbe.DataBlk;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
|
action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
|
||||||
getCacheEntry(address).Dirty := TBEs[address].Dirty;
|
assert(is_valid(cache_entry));
|
||||||
getCacheEntry(address).DataBlk := TBEs[address].DataBlk;
|
assert(is_valid(tbe));
|
||||||
getCacheEntry(address).FromL2 := true;
|
cache_entry.Dirty := tbe.Dirty;
|
||||||
|
cache_entry.DataBlk := tbe.DataBlk;
|
||||||
|
cache_entry.FromL2 := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
||||||
peek(responseToCache_in, ResponseMsg) {
|
peek(responseToCache_in, ResponseMsg) {
|
||||||
assert(in_msg.Acks > 0);
|
assert(in_msg.Acks > 0);
|
||||||
DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
|
assert(is_valid(tbe));
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
|
DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
|
||||||
DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
|
||||||
TBEs[address].LastResponder := in_msg.Sender;
|
DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
|
||||||
if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
|
tbe.LastResponder := in_msg.Sender;
|
||||||
assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime);
|
if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
|
||||||
|
assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
|
||||||
}
|
}
|
||||||
if (in_msg.InitialRequestTime != zero_time()) {
|
if (in_msg.InitialRequestTime != zero_time()) {
|
||||||
TBEs[address].InitialRequestTime := in_msg.InitialRequestTime;
|
tbe.InitialRequestTime := in_msg.InitialRequestTime;
|
||||||
}
|
}
|
||||||
if (TBEs[address].ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
|
if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
|
||||||
assert(TBEs[address].ForwardRequestTime == in_msg.ForwardRequestTime);
|
assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
|
||||||
}
|
}
|
||||||
if (in_msg.ForwardRequestTime != zero_time()) {
|
if (in_msg.ForwardRequestTime != zero_time()) {
|
||||||
TBEs[address].ForwardRequestTime := in_msg.ForwardRequestTime;
|
tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
|
||||||
}
|
}
|
||||||
if (TBEs[address].FirstResponseTime == zero_time()) {
|
if (tbe.FirstResponseTime == zero_time()) {
|
||||||
TBEs[address].FirstResponseTime := get_time();
|
tbe.FirstResponseTime := get_time();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
|
action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
|
||||||
peek(responseToCache_in, ResponseMsg) {
|
peek(responseToCache_in, ResponseMsg) {
|
||||||
TBEs[address].CurOwner := in_msg.Sender;
|
assert(is_valid(tbe));
|
||||||
|
tbe.CurOwner := in_msg.Sender;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -747,10 +819,11 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
||||||
if (TBEs[address].NumPendingMsgs == 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs == 0) {
|
||||||
enqueue(triggerQueue_out, TriggerMsg) {
|
enqueue(triggerQueue_out, TriggerMsg) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
if (TBEs[address].Sharers) {
|
if (tbe.Sharers) {
|
||||||
out_msg.Type := TriggerType:ALL_ACKS;
|
out_msg.Type := TriggerType:ALL_ACKS;
|
||||||
} else {
|
} else {
|
||||||
out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
|
out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
|
||||||
|
@ -760,23 +833,26 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
|
action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
|
action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1;
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
|
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
|
||||||
peek(forwardToCache_in, RequestMsg) {
|
peek(forwardToCache_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.Requestor);
|
out_msg.Destination.add(in_msg.Requestor);
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
if (in_msg.DirectedProbe) {
|
if (in_msg.DirectedProbe) {
|
||||||
out_msg.Acks := machineCount(MachineType:L1Cache);
|
out_msg.Acks := machineCount(MachineType:L1Cache);
|
||||||
} else {
|
} else {
|
||||||
|
@ -792,13 +868,14 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
|
action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
|
||||||
peek(forwardToCache_in, RequestMsg) {
|
peek(forwardToCache_in, RequestMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceResponseType:DATA;
|
out_msg.Type := CoherenceResponseType:DATA;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination := in_msg.MergedRequestors;
|
out_msg.Destination := in_msg.MergedRequestors;
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
out_msg.Acks := machineCount(MachineType:L1Cache);
|
out_msg.Acks := machineCount(MachineType:L1Cache);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
||||||
|
@ -809,48 +886,52 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
|
action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
|
||||||
enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
if (TBEs[address].Dirty) {
|
if (tbe.Dirty) {
|
||||||
out_msg.Type := CoherenceResponseType:WB_DIRTY;
|
out_msg.Type := CoherenceResponseType:WB_DIRTY;
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
||||||
} else {
|
} else {
|
||||||
out_msg.Type := CoherenceResponseType:WB_CLEAN;
|
out_msg.Type := CoherenceResponseType:WB_CLEAN;
|
||||||
// NOTE: in a real system this would not send data. We send
|
// NOTE: in a real system this would not send data. We send
|
||||||
// data here only so we can check it at the memory
|
// data here only so we can check it at the memory
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(r_setSharerBit, "r", desc="We saw other sharers") {
|
action(r_setSharerBit, "r", desc="We saw other sharers") {
|
||||||
TBEs[address].Sharers := true;
|
assert(is_valid(tbe));
|
||||||
|
tbe.Sharers := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
|
action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
|
||||||
enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Dirty := TBEs[address].Dirty;
|
out_msg.Dirty := tbe.Dirty;
|
||||||
if (TBEs[address].Dirty) {
|
if (tbe.Dirty) {
|
||||||
out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
|
out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
||||||
} else {
|
} else {
|
||||||
out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
|
out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
|
||||||
// NOTE: in a real system this would not send data. We send
|
// NOTE: in a real system this would not send data. We send
|
||||||
// data here only so we can check it at the memory
|
// data here only so we can check it at the memory
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -858,18 +939,20 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
||||||
peek(responseToCache_in, ResponseMsg) {
|
peek(responseToCache_in, ResponseMsg) {
|
||||||
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
assert(is_valid(cache_entry));
|
||||||
getCacheEntry(address).Dirty := in_msg.Dirty;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
|
cache_entry.Dirty := in_msg.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
|
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
|
||||||
peek(responseToCache_in, ResponseMsg) {
|
peek(responseToCache_in, ResponseMsg) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
|
DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
|
||||||
getCacheEntry(address).DataBlk, in_msg.DataBlk);
|
cache_entry.DataBlk, in_msg.DataBlk);
|
||||||
assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
|
assert(cache_entry.DataBlk == in_msg.DataBlk);
|
||||||
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
cache_entry.DataBlk := in_msg.DataBlk;
|
||||||
getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty;
|
cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -879,26 +962,28 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
} else {
|
} else {
|
||||||
L1IcacheMemory.deallocate(address);
|
L1IcacheMemory.deallocate(address);
|
||||||
}
|
}
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
||||||
if (L1DcacheMemory.isTagPresent(address) == false) {
|
if (is_invalid(cache_entry)) {
|
||||||
L1DcacheMemory.allocate(address, new Entry);
|
set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
|
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
|
||||||
if (L1IcacheMemory.isTagPresent(address) == false) {
|
if (is_invalid(cache_entry)) {
|
||||||
L1IcacheMemory.allocate(address, new Entry);
|
set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
|
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
|
||||||
L2cacheMemory.allocate(address, new Entry);
|
set_cache_entry(L2cacheMemory.allocate(address, new Entry));
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
||||||
L2cacheMemory.deallocate(address);
|
L2cacheMemory.deallocate(address);
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
||||||
|
@ -1513,4 +1598,3 @@ machine(L1Cache, "AMD Hammer-like protocol")
|
||||||
l_popForwardQueue;
|
l_popForwardQueue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,6 +166,11 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
bool isPresent(Address);
|
bool isPresent(Address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_cache_entry(AbstractCacheEntry b);
|
||||||
|
void unset_cache_entry();
|
||||||
|
void set_tbe(TBE a);
|
||||||
|
void unset_tbe();
|
||||||
|
|
||||||
// ** OBJECTS **
|
// ** OBJECTS **
|
||||||
|
|
||||||
TBETable TBEs, template_hack="<Directory_TBE>";
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
||||||
|
@ -174,17 +179,21 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
return static_cast(Entry, directory[addr]);
|
return static_cast(Entry, directory[addr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
PfEntry getPfEntry(Address addr), return_by_ref="yes" {
|
PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
|
||||||
return static_cast(PfEntry, probeFilter[addr]);
|
if(probe_filter_enabled) {
|
||||||
|
PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
|
||||||
|
return pfEntry;
|
||||||
|
}
|
||||||
|
return OOD;
|
||||||
}
|
}
|
||||||
|
|
||||||
State getState(Address addr) {
|
State getState(TBE tbe, PfEntry pf_entry, Address addr) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
return TBEs[addr].TBEState;
|
return tbe.TBEState;
|
||||||
} else {
|
} else {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
if (probeFilter.isTagPresent(addr)) {
|
if (is_valid(pf_entry)) {
|
||||||
assert(getPfEntry(addr).PfState == getDirectoryEntry(addr).DirectoryState);
|
assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
|
||||||
} else {
|
} else {
|
||||||
assert(getDirectoryEntry(addr).DirectoryState == State:E);
|
assert(getDirectoryEntry(addr).DirectoryState == State:E);
|
||||||
}
|
}
|
||||||
|
@ -193,21 +202,21 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void setState(Address addr, State state) {
|
void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
|
||||||
if (TBEs.isPresent(addr)) {
|
if (is_valid(tbe)) {
|
||||||
TBEs[addr].TBEState := state;
|
tbe.TBEState := state;
|
||||||
}
|
}
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
if (probeFilter.isTagPresent(addr)) {
|
if (is_valid(pf_entry)) {
|
||||||
getPfEntry(addr).PfState := state;
|
pf_entry.PfState := state;
|
||||||
}
|
}
|
||||||
if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
|
if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
|
||||||
assert(probeFilter.isTagPresent(addr));
|
assert(is_valid(pf_entry));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
|
if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
|
||||||
state == State:O) {
|
state == State:O) {
|
||||||
assert(TBEs.isPresent(addr) == false);
|
assert(is_valid(tbe) == false);
|
||||||
}
|
}
|
||||||
getDirectoryEntry(addr).DirectoryState := state;
|
getDirectoryEntry(addr).DirectoryState := state;
|
||||||
}
|
}
|
||||||
|
@ -242,14 +251,20 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
|
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
|
||||||
if (triggerQueue_in.isReady()) {
|
if (triggerQueue_in.isReady()) {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||||
trigger(Event:All_acks_and_owner_data, in_msg.Address);
|
trigger(Event:All_acks_and_owner_data, in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
|
} else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
|
||||||
trigger(Event:All_acks_and_shared_data, in_msg.Address);
|
trigger(Event:All_acks_and_shared_data, in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
||||||
trigger(Event:All_acks_and_data_no_sharers, in_msg.Address);
|
trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
|
} else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
|
||||||
trigger(Event:All_Unblocks, in_msg.Address);
|
trigger(Event:All_Unblocks, in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -260,20 +275,24 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
|
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
|
||||||
if (unblockNetwork_in.isReady()) {
|
if (unblockNetwork_in.isReady()) {
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||||
trigger(Event:Unblock, in_msg.Address);
|
trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
||||||
trigger(Event:UnblockS, in_msg.Address);
|
trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
|
||||||
trigger(Event:UnblockM, in_msg.Address);
|
trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
|
||||||
trigger(Event:Writeback_Clean, in_msg.Address);
|
trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
|
||||||
trigger(Event:Writeback_Dirty, in_msg.Address);
|
trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
|
||||||
trigger(Event:Writeback_Exclusive_Clean, in_msg.Address);
|
trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
|
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
|
||||||
trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address);
|
trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -285,16 +304,18 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
|
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
|
||||||
if (responseToDir_in.isReady()) {
|
if (responseToDir_in.isReady()) {
|
||||||
peek(responseToDir_in, ResponseMsg) {
|
peek(responseToDir_in, ResponseMsg) {
|
||||||
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||||
trigger(Event:Ack, in_msg.Address);
|
trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
||||||
trigger(Event:Shared_Ack, in_msg.Address);
|
trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||||
trigger(Event:Shared_Data, in_msg.Address);
|
trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||||
trigger(Event:Data, in_msg.Address);
|
trigger(Event:Data, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||||
trigger(Event:Exclusive_Data, in_msg.Address);
|
trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Unexpected message");
|
error("Unexpected message");
|
||||||
}
|
}
|
||||||
|
@ -306,10 +327,12 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
|
in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
|
||||||
if (memQueue_in.isReady()) {
|
if (memQueue_in.isReady()) {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||||
trigger(Event:Memory_Data, in_msg.Address);
|
trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||||
trigger(Event:Memory_Ack, in_msg.Address);
|
trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
DPRINTF(RubySlicc, "%d\n", in_msg.Type);
|
DPRINTF(RubySlicc, "%d\n", in_msg.Type);
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
|
@ -321,21 +344,29 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
|
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
|
||||||
if (requestQueue_in.isReady()) {
|
if (requestQueue_in.isReady()) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
|
||||||
|
TBE tbe := TBEs[in_msg.Address];
|
||||||
if (in_msg.Type == CoherenceRequestType:PUT) {
|
if (in_msg.Type == CoherenceRequestType:PUT) {
|
||||||
trigger(Event:PUT, in_msg.Address);
|
trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
if (probeFilter.isTagPresent(in_msg.Address)) {
|
if (is_valid(pf_entry)) {
|
||||||
trigger(cache_request_to_event(in_msg.Type), in_msg.Address);
|
trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
if (probeFilter.cacheAvail(in_msg.Address)) {
|
if (probeFilter.cacheAvail(in_msg.Address)) {
|
||||||
trigger(cache_request_to_event(in_msg.Type), in_msg.Address);
|
trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
trigger(Event:Pf_Replacement, probeFilter.cacheProbe(in_msg.Address));
|
trigger(Event:Pf_Replacement,
|
||||||
|
probeFilter.cacheProbe(in_msg.Address),
|
||||||
|
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
|
||||||
|
TBEs[probeFilter.cacheProbe(in_msg.Address)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trigger(cache_request_to_event(in_msg.Type), in_msg.Address);
|
trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
|
||||||
|
pf_entry, tbe);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -345,10 +376,12 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
|
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
|
||||||
if (dmaRequestQueue_in.isReady()) {
|
if (dmaRequestQueue_in.isReady()) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
|
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
|
||||||
|
TBE tbe := TBEs[in_msg.LineAddress];
|
||||||
if (in_msg.Type == DMARequestType:READ) {
|
if (in_msg.Type == DMARequestType:READ) {
|
||||||
trigger(Event:DMA_READ, in_msg.LineAddress);
|
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
|
||||||
} else if (in_msg.Type == DMARequestType:WRITE) {
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
||||||
trigger(Event:DMA_WRITE, in_msg.LineAddress);
|
trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
|
||||||
} else {
|
} else {
|
||||||
error("Invalid message");
|
error("Invalid message");
|
||||||
}
|
}
|
||||||
|
@ -360,25 +393,25 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
|
action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
assert(probeFilter.isTagPresent(address));
|
assert(is_valid(cache_entry));
|
||||||
probeFilter.setMRU(address);
|
probeFilter.setMRU(address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
|
action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
assert(probeFilter.isTagPresent(address));
|
assert(is_valid(cache_entry));
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
assert(getPfEntry(address).Owner != in_msg.Sender);
|
assert(cache_entry.Owner != in_msg.Sender);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(uo_updateOwnerIfPf, "uo", desc="update owner") {
|
action(uo_updateOwnerIfPf, "uo", desc="update owner") {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
assert(probeFilter.isTagPresent(address));
|
assert(is_valid(cache_entry));
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
getPfEntry(address).Owner := in_msg.Sender;
|
cache_entry.Owner := in_msg.Sender;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -410,8 +443,8 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
|
action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
probeFilter.allocate(address, new PfEntry);
|
set_cache_entry(probeFilter.allocate(address, new PfEntry));
|
||||||
getPfEntry(in_msg.Address).Owner := in_msg.Requestor;
|
cache_entry.Owner := in_msg.Requestor;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -419,92 +452,103 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
|
action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
probeFilter.deallocate(address);
|
probeFilter.deallocate(address);
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
|
action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
|
||||||
if (probe_filter_enabled && probeFilter.isTagPresent(address)) {
|
if (probe_filter_enabled && is_valid(cache_entry)) {
|
||||||
probeFilter.deallocate(address);
|
probeFilter.deallocate(address);
|
||||||
|
unset_cache_entry();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].PhysicalAddress := address;
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].ResponseType := CoherenceResponseType:NULL;
|
tbe.PhysicalAddress := address;
|
||||||
|
tbe.ResponseType := CoherenceResponseType:NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
TBEs.allocate(address);
|
TBEs.allocate(address);
|
||||||
TBEs[address].DmaDataBlk := in_msg.DataBlk;
|
set_tbe(TBEs[address]);
|
||||||
TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
|
tbe.DmaDataBlk := in_msg.DataBlk;
|
||||||
TBEs[address].Len := in_msg.Len;
|
tbe.PhysicalAddress := in_msg.PhysicalAddress;
|
||||||
TBEs[address].DmaRequestor := in_msg.Requestor;
|
tbe.Len := in_msg.Len;
|
||||||
TBEs[address].ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
|
tbe.DmaRequestor := in_msg.Requestor;
|
||||||
|
tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||||
//
|
//
|
||||||
// One ack for each last-level cache
|
// One ack for each last-level cache
|
||||||
//
|
//
|
||||||
TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache);
|
tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
|
||||||
//
|
//
|
||||||
// Assume initially that the caches store a clean copy and that memory
|
// Assume initially that the caches store a clean copy and that memory
|
||||||
// will provide the data
|
// will provide the data
|
||||||
//
|
//
|
||||||
TBEs[address].CacheDirty := false;
|
tbe.CacheDirty := false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
|
action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
|
||||||
TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache);
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
|
action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
|
||||||
TBEs[address].NumPendingMsgs := 1;
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
||||||
TBEs.deallocate(address);
|
TBEs.deallocate(address);
|
||||||
|
unset_tbe();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
|
action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
|
||||||
TBEs[address].Acks := 1;
|
assert(is_valid(tbe));
|
||||||
|
tbe.Acks := 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
|
action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
TBEs[address].Acks := machineCount(MachineType:L1Cache);
|
tbe.Acks := machineCount(MachineType:L1Cache);
|
||||||
} else {
|
} else {
|
||||||
TBEs[address].Acks := 1;
|
tbe.Acks := 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
||||||
peek(responseToDir_in, ResponseMsg) {
|
peek(responseToDir_in, ResponseMsg) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
assert(in_msg.Acks > 0);
|
assert(in_msg.Acks > 0);
|
||||||
DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
|
DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
|
||||||
//
|
//
|
||||||
// Note that cache data responses will have an ack count of 2. However,
|
// Note that cache data responses will have an ack count of 2. However,
|
||||||
// directory DMA requests must wait for acks from all LLC caches, so
|
// directory DMA requests must wait for acks from all LLC caches, so
|
||||||
// only decrement by 1.
|
// only decrement by 1.
|
||||||
//
|
//
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
|
||||||
DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
|
DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
|
action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
|
assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
|
||||||
DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
|
DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
|
||||||
//
|
//
|
||||||
// Note that cache data responses will have an ack count of 2. However,
|
// Note that cache data responses will have an ack count of 2. However,
|
||||||
// directory DMA requests must wait for acks from all LLC caches, so
|
// directory DMA requests must wait for acks from all LLC caches, so
|
||||||
// only decrement by 1.
|
// only decrement by 1.
|
||||||
//
|
//
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
|
||||||
DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
|
DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -513,11 +557,12 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
||||||
if (TBEs[address].NumPendingMsgs == 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs == 0) {
|
||||||
enqueue(triggerQueue_out, TriggerMsg) {
|
enqueue(triggerQueue_out, TriggerMsg) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
if (TBEs[address].Sharers) {
|
if (tbe.Sharers) {
|
||||||
if (TBEs[address].Owned) {
|
if (tbe.Owned) {
|
||||||
out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
|
out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
|
||||||
} else {
|
} else {
|
||||||
out_msg.Type := TriggerType:ALL_ACKS;
|
out_msg.Type := TriggerType:ALL_ACKS;
|
||||||
|
@ -530,7 +575,8 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
|
action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
|
||||||
if (TBEs[address].NumPendingMsgs == 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs == 0) {
|
||||||
enqueue(triggerQueue_out, TriggerMsg) {
|
enqueue(triggerQueue_out, TriggerMsg) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := TriggerType:ALL_UNBLOCKS;
|
out_msg.Type := TriggerType:ALL_UNBLOCKS;
|
||||||
|
@ -539,17 +585,20 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
|
action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
|
||||||
TBEs[address].NumPendingMsgs := TBEs[address].GetSRequestors.count();
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := tbe.GetSRequestors.count();
|
||||||
}
|
}
|
||||||
|
|
||||||
action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
|
action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
TBEs[address].NumPendingMsgs := 0;
|
assert(is_valid(tbe));
|
||||||
|
tbe.NumPendingMsgs := 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
|
action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
|
||||||
if (TBEs[address].NumPendingMsgs == 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs == 0) {
|
||||||
assert(probe_filter_enabled);
|
assert(probe_filter_enabled);
|
||||||
enqueue(triggerQueue_out, TriggerMsg) {
|
enqueue(triggerQueue_out, TriggerMsg) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
|
@ -561,14 +610,15 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
action(d_sendData, "d", desc="Send data to requestor") {
|
action(d_sendData, "d", desc="Send data to requestor") {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
|
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := TBEs[address].ResponseType;
|
out_msg.Type := tbe.ResponseType;
|
||||||
out_msg.Sender := machineID;
|
out_msg.Sender := machineID;
|
||||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||||
out_msg.DataBlk := in_msg.DataBlk;
|
out_msg.DataBlk := in_msg.DataBlk;
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
|
DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
|
||||||
out_msg.Dirty := false; // By definition, the block is now clean
|
out_msg.Dirty := false; // By definition, the block is now clean
|
||||||
out_msg.Acks := TBEs[address].Acks;
|
out_msg.Acks := tbe.Acks;
|
||||||
DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
|
DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
|
||||||
assert(out_msg.Acks > 0);
|
assert(out_msg.Acks > 0);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
|
@ -579,6 +629,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
|
action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:DATA;
|
out_msg.Type := DMAResponseType:DATA;
|
||||||
|
@ -587,7 +638,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
// split it up if need be
|
// split it up if need be
|
||||||
//
|
//
|
||||||
out_msg.DataBlk := in_msg.DataBlk;
|
out_msg.DataBlk := in_msg.DataBlk;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -596,6 +647,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
|
action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
|
||||||
peek(triggerQueue_in, TriggerMsg) {
|
peek(triggerQueue_in, TriggerMsg) {
|
||||||
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:DATA;
|
out_msg.Type := DMAResponseType:DATA;
|
||||||
|
@ -603,8 +655,8 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
// we send the entire data block and rely on the dma controller to
|
// we send the entire data block and rely on the dma controller to
|
||||||
// split it up if need be
|
// split it up if need be
|
||||||
//
|
//
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -612,39 +664,45 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
|
action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
|
||||||
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.PhysicalAddress := address;
|
out_msg.PhysicalAddress := address;
|
||||||
out_msg.LineAddress := address;
|
out_msg.LineAddress := address;
|
||||||
out_msg.Type := DMAResponseType:ACK;
|
out_msg.Type := DMAResponseType:ACK;
|
||||||
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
out_msg.Destination.add(tbe.DmaRequestor);
|
||||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
|
action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs[address].ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
|
assert(is_valid(tbe));
|
||||||
|
tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
|
action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs[address].ResponseType := CoherenceResponseType:DATA;
|
assert(is_valid(tbe));
|
||||||
|
tbe.ResponseType := CoherenceResponseType:DATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
|
action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
TBEs[address].GetSRequestors.add(in_msg.Requestor);
|
assert(is_valid(tbe));
|
||||||
|
tbe.GetSRequestors.add(in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(r_setSharerBit, "r", desc="We saw other sharers") {
|
action(r_setSharerBit, "r", desc="We saw other sharers") {
|
||||||
TBEs[address].Sharers := true;
|
assert(is_valid(tbe));
|
||||||
|
tbe.Sharers := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(so_setOwnerBit, "so", desc="We saw other sharers") {
|
action(so_setOwnerBit, "so", desc="We saw other sharers") {
|
||||||
TBEs[address].Sharers := true;
|
assert(is_valid(tbe));
|
||||||
TBEs[address].Owned := true;
|
tbe.Sharers := true;
|
||||||
|
tbe.Owned := true;
|
||||||
}
|
}
|
||||||
|
|
||||||
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
|
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
|
||||||
|
@ -676,7 +734,8 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
|
action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
|
||||||
if ((machineCount(MachineType:L1Cache) > 1) && (TBEs[address].Acks <= 1)) {
|
assert(is_valid(tbe));
|
||||||
|
if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
|
@ -707,10 +766,11 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
|
action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
|
||||||
if (machineCount(MachineType:L1Cache) > 1) {
|
if (machineCount(MachineType:L1Cache) > 1) {
|
||||||
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:INV;
|
out_msg.Type := CoherenceRequestType:INV;
|
||||||
out_msg.Requestor := machineID;
|
out_msg.Requestor := machineID;
|
||||||
out_msg.Destination.add(getPfEntry(address).Owner);
|
out_msg.Destination.add(cache_entry.Owner);
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
out_msg.DirectedProbe := true;
|
out_msg.DirectedProbe := true;
|
||||||
}
|
}
|
||||||
|
@ -742,9 +802,10 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
//
|
//
|
||||||
peek(unblockNetwork_in, ResponseMsg) {
|
peek(unblockNetwork_in, ResponseMsg) {
|
||||||
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := CoherenceRequestType:MERGED_GETS;
|
out_msg.Type := CoherenceRequestType:MERGED_GETS;
|
||||||
out_msg.MergedRequestors := TBEs[address].GetSRequestors;
|
out_msg.MergedRequestors := tbe.GetSRequestors;
|
||||||
if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
||||||
out_msg.Destination.add(in_msg.CurOwner);
|
out_msg.Destination.add(in_msg.CurOwner);
|
||||||
} else {
|
} else {
|
||||||
|
@ -762,10 +823,11 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
||||||
|
assert(is_valid(cache_entry));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := in_msg.Type;
|
out_msg.Type := in_msg.Type;
|
||||||
out_msg.Requestor := in_msg.Requestor;
|
out_msg.Requestor := in_msg.Requestor;
|
||||||
out_msg.Destination.add(getPfEntry(address).Owner);
|
out_msg.Destination.add(cache_entry.Owner);
|
||||||
out_msg.MessageSize := MessageSizeType:Request_Control;
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
||||||
out_msg.DirectedProbe := true;
|
out_msg.DirectedProbe := true;
|
||||||
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
||||||
|
@ -789,7 +851,8 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
|
action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
|
||||||
if (TBEs[address].NumPendingMsgs > 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs > 0) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
|
@ -807,7 +870,8 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
}
|
}
|
||||||
|
|
||||||
action(f_forwardReadFromDma, "fr", desc="Forward requests") {
|
action(f_forwardReadFromDma, "fr", desc="Forward requests") {
|
||||||
if (TBEs[address].NumPendingMsgs > 0) {
|
assert(is_valid(tbe));
|
||||||
|
if (tbe.NumPendingMsgs > 0) {
|
||||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||||
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
|
@ -860,16 +924,18 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
|
action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
|
||||||
peek(memQueue_in, MemoryMsg) {
|
peek(memQueue_in, MemoryMsg) {
|
||||||
if (TBEs[address].CacheDirty == false) {
|
assert(is_valid(tbe));
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
if (tbe.CacheDirty == false) {
|
||||||
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
|
action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
|
||||||
peek(responseToDir_in, ResponseMsg) {
|
peek(responseToDir_in, ResponseMsg) {
|
||||||
TBEs[address].CacheDirty := true;
|
assert(is_valid(tbe));
|
||||||
TBEs[address].DataBlk := in_msg.DataBlk;
|
tbe.CacheDirty := true;
|
||||||
|
tbe.DataBlk := in_msg.DataBlk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -893,26 +959,30 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
||||||
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
||||||
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
assert(is_valid(tbe));
|
||||||
|
getDirectoryEntry(address).DataBlk := tbe.DataBlk;
|
||||||
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
||||||
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
|
action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
||||||
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
getDirectoryEntry(address).DataBlk := tbe.DataBlk;
|
||||||
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
|
action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
|
||||||
assert(TBEs[address].CacheDirty);
|
assert(is_valid(tbe));
|
||||||
|
assert(tbe.CacheDirty);
|
||||||
}
|
}
|
||||||
|
|
||||||
action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
|
action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
|
||||||
if (probe_filter_enabled) {
|
if (probe_filter_enabled) {
|
||||||
peek(requestQueue_in, RequestMsg) {
|
peek(requestQueue_in, RequestMsg) {
|
||||||
assert(getPfEntry(address).Owner != in_msg.Requestor);
|
assert(is_valid(cache_entry));
|
||||||
|
assert(cache_entry.Owner != in_msg.Requestor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -929,12 +999,13 @@ machine(Directory, "AMD Hammer-like protocol")
|
||||||
|
|
||||||
action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
|
action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
|
||||||
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
||||||
|
assert(is_valid(tbe));
|
||||||
out_msg.Address := address;
|
out_msg.Address := address;
|
||||||
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
||||||
// first, initialize the data blk to the current version of system memory
|
// first, initialize the data blk to the current version of system memory
|
||||||
out_msg.DataBlk := TBEs[address].DataBlk;
|
out_msg.DataBlk := tbe.DataBlk;
|
||||||
// then add the dma write data
|
// then add the dma write data
|
||||||
out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
|
||||||
DPRINTF(RubySlicc, "%s\n", out_msg);
|
DPRINTF(RubySlicc, "%s\n", out_msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,15 +117,16 @@ external_type(DirectoryMemory) {
|
||||||
void invalidateBlock(Address);
|
void invalidateBlock(Address);
|
||||||
}
|
}
|
||||||
|
|
||||||
external_type(AbstractCacheEntry, primitive="yes");
|
external_type(AbstractCacheEntry, primitive="yes") {
|
||||||
|
void changePermission(AccessPermission);
|
||||||
|
}
|
||||||
|
|
||||||
external_type(CacheMemory) {
|
external_type(CacheMemory) {
|
||||||
bool cacheAvail(Address);
|
bool cacheAvail(Address);
|
||||||
Address cacheProbe(Address);
|
Address cacheProbe(Address);
|
||||||
void allocate(Address, AbstractCacheEntry);
|
AbstractCacheEntry allocate(Address, AbstractCacheEntry);
|
||||||
void deallocate(Address);
|
void deallocate(Address);
|
||||||
AbstractCacheEntry lookup(Address);
|
AbstractCacheEntry lookup(Address);
|
||||||
void changePermission(Address, AccessPermission);
|
|
||||||
bool isTagPresent(Address);
|
bool isTagPresent(Address);
|
||||||
void profileMiss(CacheMsg);
|
void profileMiss(CacheMsg);
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,26 @@ AbstractCacheEntry::AbstractCacheEntry()
|
||||||
{
|
{
|
||||||
m_Address.setAddress(0);
|
m_Address.setAddress(0);
|
||||||
m_Permission = AccessPermission_NotPresent;
|
m_Permission = AccessPermission_NotPresent;
|
||||||
|
m_locked = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
AbstractCacheEntry::~AbstractCacheEntry()
|
AbstractCacheEntry::~AbstractCacheEntry()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AccessPermission
|
||||||
|
AbstractCacheEntry::getPermission() const
|
||||||
|
{
|
||||||
|
return m_Permission;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
AbstractCacheEntry::changePermission(AccessPermission new_perm)
|
||||||
|
{
|
||||||
|
m_Permission = new_perm;
|
||||||
|
if ((new_perm == AccessPermission_Invalid) ||
|
||||||
|
(new_perm == AccessPermission_NotPresent) ||
|
||||||
|
(new_perm == AccessPermission_Stale)) {
|
||||||
|
m_locked = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -48,11 +48,17 @@ class AbstractCacheEntry : public AbstractEntry
|
||||||
AbstractCacheEntry();
|
AbstractCacheEntry();
|
||||||
virtual ~AbstractCacheEntry() = 0;
|
virtual ~AbstractCacheEntry() = 0;
|
||||||
|
|
||||||
|
// Get/Set permission of cache entry
|
||||||
|
AccessPermission getPermission() const;
|
||||||
|
void changePermission(AccessPermission new_perm);
|
||||||
|
|
||||||
Address m_Address; // Address of this block, required by CacheMemory
|
Address m_Address; // Address of this block, required by CacheMemory
|
||||||
Time m_LastRef; // Last time this block was referenced, required
|
Time m_LastRef; // Last time this block was referenced, required
|
||||||
// by CacheMemory
|
// by CacheMemory
|
||||||
AccessPermission m_Permission; // Access permission for this
|
AccessPermission m_Permission; // Access permission for this
|
||||||
// block, required by CacheMemory
|
// block, required by CacheMemory
|
||||||
|
int m_locked; // Holds info whether the address is locked,
|
||||||
|
// required for implementing LL/SC
|
||||||
};
|
};
|
||||||
|
|
||||||
inline std::ostream&
|
inline std::ostream&
|
||||||
|
@ -64,4 +70,3 @@ operator<<(std::ostream& out, const AbstractCacheEntry& obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCACHEENTRY_HH__
|
#endif // __MEM_RUBY_SLICC_INTERFACE_ABSTRACTCACHEENTRY_HH__
|
||||||
|
|
||||||
|
|
|
@ -75,13 +75,10 @@ CacheMemory::init()
|
||||||
assert(false);
|
assert(false);
|
||||||
|
|
||||||
m_cache.resize(m_cache_num_sets);
|
m_cache.resize(m_cache_num_sets);
|
||||||
m_locked.resize(m_cache_num_sets);
|
|
||||||
for (int i = 0; i < m_cache_num_sets; i++) {
|
for (int i = 0; i < m_cache_num_sets; i++) {
|
||||||
m_cache[i].resize(m_cache_assoc);
|
m_cache[i].resize(m_cache_assoc);
|
||||||
m_locked[i].resize(m_cache_assoc);
|
|
||||||
for (int j = 0; j < m_cache_assoc; j++) {
|
for (int j = 0; j < m_cache_assoc; j++) {
|
||||||
m_cache[i][j] = NULL;
|
m_cache[i][j] = NULL;
|
||||||
m_locked[i][j] = -1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -255,7 +252,7 @@ CacheMemory::cacheAvail(const Address& address) const
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
AbstractCacheEntry*
|
||||||
CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
|
CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
|
||||||
{
|
{
|
||||||
assert(address == line_address(address));
|
assert(address == line_address(address));
|
||||||
|
@ -273,13 +270,13 @@ CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
|
||||||
set[i]->m_Permission = AccessPermission_Invalid;
|
set[i]->m_Permission = AccessPermission_Invalid;
|
||||||
DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
|
DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
|
||||||
address);
|
address);
|
||||||
m_locked[cacheSet][i] = -1;
|
set[i]->m_locked = -1;
|
||||||
m_tag_index[address] = i;
|
m_tag_index[address] = i;
|
||||||
|
|
||||||
m_replacementPolicy_ptr->
|
m_replacementPolicy_ptr->
|
||||||
touch(cacheSet, i, g_eventQueue_ptr->getTime());
|
touch(cacheSet, i, g_eventQueue_ptr->getTime());
|
||||||
|
|
||||||
return;
|
return entry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
panic("Allocate didn't find an available entry");
|
panic("Allocate didn't find an available entry");
|
||||||
|
@ -296,9 +293,6 @@ CacheMemory::deallocate(const Address& address)
|
||||||
if (loc != -1) {
|
if (loc != -1) {
|
||||||
delete m_cache[cacheSet][loc];
|
delete m_cache[cacheSet][loc];
|
||||||
m_cache[cacheSet][loc] = NULL;
|
m_cache[cacheSet][loc] = NULL;
|
||||||
DPRINTF(RubyCache, "Deallocate clearing lock for addr: %x\n",
|
|
||||||
address);
|
|
||||||
m_locked[cacheSet][loc] = -1;
|
|
||||||
m_tag_index.erase(address);
|
m_tag_index.erase(address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -316,49 +310,25 @@ CacheMemory::cacheProbe(const Address& address) const
|
||||||
}
|
}
|
||||||
|
|
||||||
// looks an address up in the cache
|
// looks an address up in the cache
|
||||||
AbstractCacheEntry&
|
AbstractCacheEntry*
|
||||||
CacheMemory::lookup(const Address& address)
|
CacheMemory::lookup(const Address& address)
|
||||||
{
|
{
|
||||||
assert(address == line_address(address));
|
assert(address == line_address(address));
|
||||||
Index cacheSet = addressToCacheSet(address);
|
Index cacheSet = addressToCacheSet(address);
|
||||||
int loc = findTagInSet(cacheSet, address);
|
int loc = findTagInSet(cacheSet, address);
|
||||||
assert(loc != -1);
|
if(loc == -1) return NULL;
|
||||||
return *m_cache[cacheSet][loc];
|
return m_cache[cacheSet][loc];
|
||||||
}
|
}
|
||||||
|
|
||||||
// looks an address up in the cache
|
// looks an address up in the cache
|
||||||
const AbstractCacheEntry&
|
const AbstractCacheEntry*
|
||||||
CacheMemory::lookup(const Address& address) const
|
CacheMemory::lookup(const Address& address) const
|
||||||
{
|
{
|
||||||
assert(address == line_address(address));
|
assert(address == line_address(address));
|
||||||
Index cacheSet = addressToCacheSet(address);
|
Index cacheSet = addressToCacheSet(address);
|
||||||
int loc = findTagInSet(cacheSet, address);
|
int loc = findTagInSet(cacheSet, address);
|
||||||
assert(loc != -1);
|
if(loc == -1) return NULL;
|
||||||
return *m_cache[cacheSet][loc];
|
return m_cache[cacheSet][loc];
|
||||||
}
|
|
||||||
|
|
||||||
AccessPermission
|
|
||||||
CacheMemory::getPermission(const Address& address) const
|
|
||||||
{
|
|
||||||
assert(address == line_address(address));
|
|
||||||
return lookup(address).m_Permission;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
CacheMemory::changePermission(const Address& address,
|
|
||||||
AccessPermission new_perm)
|
|
||||||
{
|
|
||||||
assert(address == line_address(address));
|
|
||||||
lookup(address).m_Permission = new_perm;
|
|
||||||
Index cacheSet = addressToCacheSet(address);
|
|
||||||
int loc = findTagInSet(cacheSet, address);
|
|
||||||
if ((new_perm == AccessPermission_Invalid) ||
|
|
||||||
(new_perm == AccessPermission_NotPresent) ||
|
|
||||||
(new_perm == AccessPermission_Stale)) {
|
|
||||||
DPRINTF(RubyCache, "Permission clearing lock for addr: %x\n", address);
|
|
||||||
m_locked[cacheSet][loc] = -1;
|
|
||||||
}
|
|
||||||
assert(getPermission(address) == new_perm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the most recently used bit for a cache block
|
// Sets the most recently used bit for a cache block
|
||||||
|
@ -460,10 +430,10 @@ void
|
||||||
CacheMemory::getMemoryValue(const Address& addr, char* value,
|
CacheMemory::getMemoryValue(const Address& addr, char* value,
|
||||||
unsigned size_in_bytes)
|
unsigned size_in_bytes)
|
||||||
{
|
{
|
||||||
AbstractCacheEntry& entry = lookup(line_address(addr));
|
AbstractCacheEntry* entry = lookup(line_address(addr));
|
||||||
unsigned startByte = addr.getAddress() - line_address(addr).getAddress();
|
unsigned startByte = addr.getAddress() - line_address(addr).getAddress();
|
||||||
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
||||||
value[i] = entry.getDataBlk().getByte(i + startByte);
|
value[i] = entry->getDataBlk().getByte(i + startByte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -471,11 +441,11 @@ void
|
||||||
CacheMemory::setMemoryValue(const Address& addr, char* value,
|
CacheMemory::setMemoryValue(const Address& addr, char* value,
|
||||||
unsigned size_in_bytes)
|
unsigned size_in_bytes)
|
||||||
{
|
{
|
||||||
AbstractCacheEntry& entry = lookup(line_address(addr));
|
AbstractCacheEntry* entry = lookup(line_address(addr));
|
||||||
unsigned startByte = addr.getAddress() - line_address(addr).getAddress();
|
unsigned startByte = addr.getAddress() - line_address(addr).getAddress();
|
||||||
assert(size_in_bytes > 0);
|
assert(size_in_bytes > 0);
|
||||||
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
for (unsigned i = 0; i < size_in_bytes; ++i) {
|
||||||
entry.getDataBlk().setByte(i + startByte, value[i]);
|
entry->getDataBlk().setByte(i + startByte, value[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// entry = lookup(line_address(addr));
|
// entry = lookup(line_address(addr));
|
||||||
|
@ -489,7 +459,7 @@ CacheMemory::setLocked(const Address& address, int context)
|
||||||
Index cacheSet = addressToCacheSet(address);
|
Index cacheSet = addressToCacheSet(address);
|
||||||
int loc = findTagInSet(cacheSet, address);
|
int loc = findTagInSet(cacheSet, address);
|
||||||
assert(loc != -1);
|
assert(loc != -1);
|
||||||
m_locked[cacheSet][loc] = context;
|
m_cache[cacheSet][loc]->m_locked = context;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -500,7 +470,7 @@ CacheMemory::clearLocked(const Address& address)
|
||||||
Index cacheSet = addressToCacheSet(address);
|
Index cacheSet = addressToCacheSet(address);
|
||||||
int loc = findTagInSet(cacheSet, address);
|
int loc = findTagInSet(cacheSet, address);
|
||||||
assert(loc != -1);
|
assert(loc != -1);
|
||||||
m_locked[cacheSet][loc] = -1;
|
m_cache[cacheSet][loc]->m_locked = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
@ -511,7 +481,7 @@ CacheMemory::isLocked(const Address& address, int context)
|
||||||
int loc = findTagInSet(cacheSet, address);
|
int loc = findTagInSet(cacheSet, address);
|
||||||
assert(loc != -1);
|
assert(loc != -1);
|
||||||
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
|
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
|
||||||
address, m_locked[cacheSet][loc], context);
|
address, m_cache[cacheSet][loc]->m_locked, context);
|
||||||
return m_locked[cacheSet][loc] == context;
|
return m_cache[cacheSet][loc]->m_locked == context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ class CacheMemory : public SimObject
|
||||||
bool cacheAvail(const Address& address) const;
|
bool cacheAvail(const Address& address) const;
|
||||||
|
|
||||||
// find an unused entry and sets the tag appropriate for the address
|
// find an unused entry and sets the tag appropriate for the address
|
||||||
void allocate(const Address& address, AbstractCacheEntry* new_entry);
|
AbstractCacheEntry* allocate(const Address& address, AbstractCacheEntry* new_entry);
|
||||||
|
|
||||||
// Explicitly free up this address
|
// Explicitly free up this address
|
||||||
void deallocate(const Address& address);
|
void deallocate(const Address& address);
|
||||||
|
@ -91,12 +91,8 @@ class CacheMemory : public SimObject
|
||||||
Address cacheProbe(const Address& address) const;
|
Address cacheProbe(const Address& address) const;
|
||||||
|
|
||||||
// looks an address up in the cache
|
// looks an address up in the cache
|
||||||
AbstractCacheEntry& lookup(const Address& address);
|
AbstractCacheEntry* lookup(const Address& address);
|
||||||
const AbstractCacheEntry& lookup(const Address& address) const;
|
const AbstractCacheEntry* lookup(const Address& address) const;
|
||||||
|
|
||||||
// Get/Set permission of cache block
|
|
||||||
AccessPermission getPermission(const Address& address) const;
|
|
||||||
void changePermission(const Address& address, AccessPermission new_perm);
|
|
||||||
|
|
||||||
int getLatency() const { return m_latency; }
|
int getLatency() const { return m_latency; }
|
||||||
|
|
||||||
|
@ -158,7 +154,6 @@ class CacheMemory : public SimObject
|
||||||
// The second index is the the amount associativity.
|
// The second index is the the amount associativity.
|
||||||
m5::hash_map<Address, int> m_tag_index;
|
m5::hash_map<Address, int> m_tag_index;
|
||||||
std::vector<std::vector<AbstractCacheEntry*> > m_cache;
|
std::vector<std::vector<AbstractCacheEntry*> > m_cache;
|
||||||
std::vector<std::vector<int> > m_locked;
|
|
||||||
|
|
||||||
AbstractReplacementPolicy *m_replacementPolicy_ptr;
|
AbstractReplacementPolicy *m_replacementPolicy_ptr;
|
||||||
|
|
||||||
|
|
|
@ -61,8 +61,7 @@ class TBETable
|
||||||
return (m_number_of_TBEs - m_map.size()) >= n;
|
return (m_number_of_TBEs - m_map.size()) >= n;
|
||||||
}
|
}
|
||||||
|
|
||||||
ENTRY& lookup(const Address& address);
|
ENTRY* lookup(const Address& address);
|
||||||
const ENTRY& lookup(const Address& address) const;
|
|
||||||
|
|
||||||
// Print cache contents
|
// Print cache contents
|
||||||
void print(std::ostream& out) const;
|
void print(std::ostream& out) const;
|
||||||
|
@ -117,21 +116,13 @@ TBETable<ENTRY>::deallocate(const Address& address)
|
||||||
|
|
||||||
// looks an address up in the cache
|
// looks an address up in the cache
|
||||||
template<class ENTRY>
|
template<class ENTRY>
|
||||||
inline ENTRY&
|
inline ENTRY*
|
||||||
TBETable<ENTRY>::lookup(const Address& address)
|
TBETable<ENTRY>::lookup(const Address& address)
|
||||||
{
|
{
|
||||||
assert(isPresent(address));
|
if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
|
||||||
return m_map.find(address)->second;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// looks an address up in the cache
|
|
||||||
template<class ENTRY>
|
|
||||||
inline const ENTRY&
|
|
||||||
TBETable<ENTRY>::lookup(const Address& address) const
|
|
||||||
{
|
|
||||||
assert(isPresent(address));
|
|
||||||
return m_map.find(address)->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<class ENTRY>
|
template<class ENTRY>
|
||||||
inline void
|
inline void
|
||||||
|
|
|
@ -39,6 +39,11 @@ class ActionDeclAST(DeclAST):
|
||||||
|
|
||||||
def generate(self):
|
def generate(self):
|
||||||
resources = {}
|
resources = {}
|
||||||
|
|
||||||
|
machine = self.symtab.state_machine
|
||||||
|
if machine is None:
|
||||||
|
self.error("Action declaration not part of a machine.")
|
||||||
|
|
||||||
if self.statement_list:
|
if self.statement_list:
|
||||||
# Add new local vars
|
# Add new local vars
|
||||||
self.symtab.pushFrame()
|
self.symtab.pushFrame()
|
||||||
|
@ -52,6 +57,16 @@ class ActionDeclAST(DeclAST):
|
||||||
"addr", self.pairs)
|
"addr", self.pairs)
|
||||||
self.symtab.newSymbol(var)
|
self.symtab.newSymbol(var)
|
||||||
|
|
||||||
|
if machine.TBEType != None:
|
||||||
|
var = Var(self.symtab, "tbe", self.location, machine.TBEType,
|
||||||
|
"(*m_tbe_ptr)", self.pairs)
|
||||||
|
self.symtab.newSymbol(var)
|
||||||
|
|
||||||
|
if machine.EntryType != None:
|
||||||
|
var = Var(self.symtab, "cache_entry", self.location,
|
||||||
|
machine.EntryType, "(*m_cache_entry_ptr)", self.pairs)
|
||||||
|
self.symtab.newSymbol(var)
|
||||||
|
|
||||||
# Do not allows returns in actions
|
# Do not allows returns in actions
|
||||||
code = self.slicc.codeFormatter()
|
code = self.slicc.codeFormatter()
|
||||||
self.statement_list.generate(code, None)
|
self.statement_list.generate(code, None)
|
||||||
|
@ -61,10 +76,6 @@ class ActionDeclAST(DeclAST):
|
||||||
|
|
||||||
self.symtab.popFrame()
|
self.symtab.popFrame()
|
||||||
|
|
||||||
machine = self.symtab.state_machine
|
|
||||||
if machine is None:
|
|
||||||
self.error("Action declaration not part of a machine.")
|
|
||||||
|
|
||||||
action = Action(self.symtab, self.ident, resources, self.location,
|
action = Action(self.symtab, self.ident, resources, self.location,
|
||||||
self.pairs)
|
self.pairs)
|
||||||
machine.addAction(action)
|
machine.addAction(action)
|
||||||
|
|
|
@ -48,7 +48,17 @@ class FormalParamAST(AST):
|
||||||
param = "param_%s" % self.ident
|
param = "param_%s" % self.ident
|
||||||
|
|
||||||
# Add to symbol table
|
# Add to symbol table
|
||||||
|
if self.pointer or str(type) == "TBE" or (
|
||||||
|
"interface" in type and type["interface"] == "AbstractCacheEntry"):
|
||||||
|
|
||||||
|
v = Var(self.symtab, self.ident, self.location, type,
|
||||||
|
"(*%s)" % param, self.pairs)
|
||||||
|
self.symtab.newSymbol(v)
|
||||||
|
return type, "%s* %s" % (type.c_ident, param)
|
||||||
|
|
||||||
|
else:
|
||||||
v = Var(self.symtab, self.ident, self.location, type, param,
|
v = Var(self.symtab, self.ident, self.location, type, param,
|
||||||
self.pairs)
|
self.pairs)
|
||||||
self.symtab.newSymbol(v)
|
self.symtab.newSymbol(v)
|
||||||
|
|
||||||
return type, "%s %s" % (type.c_ident, param)
|
return type, "%s %s" % (type.c_ident, param)
|
||||||
|
|
|
@ -89,6 +89,7 @@ class FuncCallExprAST(ExprAST):
|
||||||
len(func.param_types), len(self.exprs))
|
len(func.param_types), len(self.exprs))
|
||||||
|
|
||||||
cvec = []
|
cvec = []
|
||||||
|
type_vec = []
|
||||||
for expr,expected_type in zip(self.exprs, func.param_types):
|
for expr,expected_type in zip(self.exprs, func.param_types):
|
||||||
# Check the types of the parameter
|
# Check the types of the parameter
|
||||||
actual_type,param_code = expr.inline(True)
|
actual_type,param_code = expr.inline(True)
|
||||||
|
@ -96,6 +97,7 @@ class FuncCallExprAST(ExprAST):
|
||||||
expr.error("Type mismatch: expected: %s actual: %s" % \
|
expr.error("Type mismatch: expected: %s actual: %s" % \
|
||||||
(expected_type, actual_type))
|
(expected_type, actual_type))
|
||||||
cvec.append(param_code)
|
cvec.append(param_code)
|
||||||
|
type_vec.append(expected_type)
|
||||||
|
|
||||||
# OK, the semantics of "trigger" here is that, ports in the
|
# OK, the semantics of "trigger" here is that, ports in the
|
||||||
# machine have different priorities. We always check the first
|
# machine have different priorities. We always check the first
|
||||||
|
@ -115,8 +117,25 @@ class FuncCallExprAST(ExprAST):
|
||||||
code('''
|
code('''
|
||||||
{
|
{
|
||||||
Address addr = ${{cvec[1]}};
|
Address addr = ${{cvec[1]}};
|
||||||
TransitionResult result = doTransition(${{cvec[0]}}, ${machine}_getState(addr), addr);
|
''')
|
||||||
|
if machine.TBEType != None and machine.EntryType != None:
|
||||||
|
code('''
|
||||||
|
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, ${{cvec[3]}}, addr);
|
||||||
|
''')
|
||||||
|
elif machine.TBEType != None:
|
||||||
|
code('''
|
||||||
|
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, addr);
|
||||||
|
''')
|
||||||
|
elif machine.EntryType != None:
|
||||||
|
code('''
|
||||||
|
TransitionResult result = doTransition(${{cvec[0]}}, ${{cvec[2]}}, addr);
|
||||||
|
''')
|
||||||
|
else:
|
||||||
|
code('''
|
||||||
|
TransitionResult result = doTransition(${{cvec[0]}}, addr);
|
||||||
|
''')
|
||||||
|
|
||||||
|
code('''
|
||||||
if (result == TransitionResult_Valid) {
|
if (result == TransitionResult_Valid) {
|
||||||
counter++;
|
counter++;
|
||||||
continue; // Check the first port again
|
continue; // Check the first port again
|
||||||
|
@ -175,6 +194,16 @@ if (!(${{cvec[0]}})) {
|
||||||
elif self.proc_name == "continueProcessing":
|
elif self.proc_name == "continueProcessing":
|
||||||
code("counter++;")
|
code("counter++;")
|
||||||
code("continue; // Check the first port again")
|
code("continue; // Check the first port again")
|
||||||
|
|
||||||
|
elif self.proc_name == "set_cache_entry":
|
||||||
|
code("set_cache_entry(m_cache_entry_ptr, %s);" %(cvec[0]));
|
||||||
|
elif self.proc_name == "unset_cache_entry":
|
||||||
|
code("unset_cache_entry(m_cache_entry_ptr);");
|
||||||
|
elif self.proc_name == "set_tbe":
|
||||||
|
code("set_tbe(m_tbe_ptr, %s);" %(cvec[0]));
|
||||||
|
elif self.proc_name == "unset_tbe":
|
||||||
|
code("unset_tbe(m_tbe_ptr);");
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Normal function
|
# Normal function
|
||||||
|
|
||||||
|
@ -184,7 +213,27 @@ if (!(${{cvec[0]}})) {
|
||||||
if "external" not in func and not func.isInternalMachineFunc:
|
if "external" not in func and not func.isInternalMachineFunc:
|
||||||
internal = "m_chip_ptr->"
|
internal = "m_chip_ptr->"
|
||||||
|
|
||||||
params = ', '.join(str(c) for c in cvec)
|
params = ""
|
||||||
|
first_param = True
|
||||||
|
|
||||||
|
for (param_code, type) in zip(cvec, type_vec):
|
||||||
|
if str(type) == "TBE" or ("interface" in type and
|
||||||
|
type["interface"] == "AbstractCacheEntry"):
|
||||||
|
|
||||||
|
if first_param:
|
||||||
|
params = str(param_code).replace('*','')
|
||||||
|
first_param = False
|
||||||
|
else:
|
||||||
|
params += ', '
|
||||||
|
params += str(param_code).replace('*','');
|
||||||
|
else:
|
||||||
|
if first_param:
|
||||||
|
params = str(param_code)
|
||||||
|
first_param = False
|
||||||
|
else:
|
||||||
|
params += ', '
|
||||||
|
params += str(param_code);
|
||||||
|
|
||||||
fix = code.nofix()
|
fix = code.nofix()
|
||||||
code('(${internal}${{func.c_ident}}($params))')
|
code('(${internal}${{func.c_ident}}($params))')
|
||||||
code.fix(fix)
|
code.fix(fix)
|
||||||
|
|
|
@ -55,13 +55,17 @@ class IfStatementAST(StatementAST):
|
||||||
code('if ($cond_code) {')
|
code('if ($cond_code) {')
|
||||||
# Then part
|
# Then part
|
||||||
code.indent()
|
code.indent()
|
||||||
|
self.symtab.pushFrame()
|
||||||
self.then.generate(code, return_type)
|
self.then.generate(code, return_type)
|
||||||
|
self.symtab.popFrame()
|
||||||
code.dedent()
|
code.dedent()
|
||||||
# Else part
|
# Else part
|
||||||
if self.else_:
|
if self.else_:
|
||||||
code('} else {')
|
code('} else {')
|
||||||
code.indent()
|
code.indent()
|
||||||
|
self.symtab.pushFrame()
|
||||||
self.else_.generate(code, return_type)
|
self.else_.generate(code, return_type)
|
||||||
|
self.symtab.popFrame()
|
||||||
code.dedent()
|
code.dedent()
|
||||||
code('}') # End scope
|
code('}') # End scope
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,10 @@ class InPortDeclAST(DeclAST):
|
||||||
symtab = self.symtab
|
symtab = self.symtab
|
||||||
void_type = symtab.find("void", Type)
|
void_type = symtab.find("void", Type)
|
||||||
|
|
||||||
|
machine = symtab.state_machine
|
||||||
|
if machine is None:
|
||||||
|
self.error("InPort declaration not part of a machine.")
|
||||||
|
|
||||||
code = self.slicc.codeFormatter()
|
code = self.slicc.codeFormatter()
|
||||||
queue_type = self.var_expr.generate(code)
|
queue_type = self.var_expr.generate(code)
|
||||||
if not queue_type.isInPort:
|
if not queue_type.isInPort:
|
||||||
|
@ -79,6 +83,11 @@ class InPortDeclAST(DeclAST):
|
||||||
|
|
||||||
param_types.append(type)
|
param_types.append(type)
|
||||||
|
|
||||||
|
if machine.EntryType != None:
|
||||||
|
param_types.append(machine.EntryType)
|
||||||
|
if machine.TBEType != None:
|
||||||
|
param_types.append(machine.TBEType)
|
||||||
|
|
||||||
# Add the trigger method - FIXME, this is a bit dirty
|
# Add the trigger method - FIXME, this is a bit dirty
|
||||||
pairs = { "external" : "yes" }
|
pairs = { "external" : "yes" }
|
||||||
func = Func(self.symtab, "trigger", self.location, void_type,
|
func = Func(self.symtab, "trigger", self.location, void_type,
|
||||||
|
@ -123,13 +132,10 @@ class InPortDeclAST(DeclAST):
|
||||||
rcode.indent()
|
rcode.indent()
|
||||||
self.statements.generate(rcode, None)
|
self.statements.generate(rcode, None)
|
||||||
in_port["c_code_in_port"] = str(rcode)
|
in_port["c_code_in_port"] = str(rcode)
|
||||||
|
|
||||||
symtab.popFrame()
|
symtab.popFrame()
|
||||||
|
|
||||||
# Add port to state machine
|
# Add port to state machine
|
||||||
machine = symtab.state_machine
|
|
||||||
if machine is None:
|
|
||||||
self.error("InPort declaration not part of a machine.")
|
|
||||||
|
|
||||||
machine.addInPort(in_port)
|
machine.addInPort(in_port)
|
||||||
|
|
||||||
# Include max_rank to be used by StateMachine.py
|
# Include max_rank to be used by StateMachine.py
|
||||||
|
|
53
src/mem/slicc/ast/IsValidPtrExprAST.py
Normal file
53
src/mem/slicc/ast/IsValidPtrExprAST.py
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
#
|
||||||
|
# Copyright (c) 2011 Mark D. Hill and David A. Wood
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met: redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer;
|
||||||
|
# redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution;
|
||||||
|
# neither the name of the copyright holders nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
|
||||||
|
from slicc.ast.ExprAST import ExprAST
|
||||||
|
from slicc.symbols import Type
|
||||||
|
|
||||||
|
class IsValidPtrExprAST(ExprAST):
|
||||||
|
def __init__(self, slicc, variable, flag):
|
||||||
|
super(IsValidPtrExprAST, self).__init__(slicc)
|
||||||
|
self.variable = variable
|
||||||
|
self.flag = flag
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "[IsValidPtrExprAST: %r]" % self.variable
|
||||||
|
|
||||||
|
def generate(self, code):
|
||||||
|
# Make sure the variable is valid
|
||||||
|
fix = code.nofix()
|
||||||
|
code("(")
|
||||||
|
var_type, var_code = self.variable.inline(True);
|
||||||
|
var_code_str = str(var_code).replace('*','')
|
||||||
|
if self.flag:
|
||||||
|
code("${var_code_str} != NULL)")
|
||||||
|
else:
|
||||||
|
code("${var_code_str} == NULL)")
|
||||||
|
code.fix(fix)
|
||||||
|
type = self.symtab.find("bool", Type)
|
||||||
|
return type
|
54
src/mem/slicc/ast/LocalVariableAST.py
Normal file
54
src/mem/slicc/ast/LocalVariableAST.py
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
#
|
||||||
|
# Copyright (c) 2011 Mark D. Hill and David A. Wood
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met: redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer;
|
||||||
|
# redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution;
|
||||||
|
# neither the name of the copyright holders nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
|
||||||
|
from slicc.ast.StatementAST import StatementAST
|
||||||
|
from slicc.symbols import Var
|
||||||
|
|
||||||
|
class LocalVariableAST(StatementAST):
|
||||||
|
def __init__(self, slicc, type_ast, ident):
|
||||||
|
super(LocalVariableAST, self).__init__(slicc)
|
||||||
|
self.type_ast = type_ast
|
||||||
|
self.ident = ident
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "[LocalVariableAST: %r %r]" % (self.type_ast, self.ident)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self.var_name
|
||||||
|
|
||||||
|
def generate(self, code):
|
||||||
|
type = self.type_ast.type;
|
||||||
|
ident = "%s" % self.ident;
|
||||||
|
|
||||||
|
# Add to symbol table
|
||||||
|
v = Var(self.symtab, self.ident, self.location, type, ident,
|
||||||
|
self.pairs)
|
||||||
|
self.symtab.newSymbol(v)
|
||||||
|
code += "%s* %s" % (type.c_ident, ident)
|
||||||
|
return type
|
|
@ -97,7 +97,26 @@ class MemberMethodCallExprAST(MethodCallExprAST):
|
||||||
|
|
||||||
prefix = ""
|
prefix = ""
|
||||||
implements_interface = False
|
implements_interface = False
|
||||||
if methodId not in obj_type.methods:
|
|
||||||
|
if methodId in obj_type.methods:
|
||||||
|
return_type = obj_type.methods[methodId].return_type
|
||||||
|
|
||||||
|
else:
|
||||||
|
#
|
||||||
|
# Check whether the method is implemented by the super class
|
||||||
|
if "interface" in obj_type:
|
||||||
|
interface_type = self.symtab.find(obj_type["interface"]);
|
||||||
|
|
||||||
|
if methodId in interface_type.methods:
|
||||||
|
return_type = interface_type.methods[methodId].return_type
|
||||||
|
obj_type = interface_type
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.error("Invalid method call: " \
|
||||||
|
"Type '%s' does not have a method %s, '%s'",
|
||||||
|
obj_type, self.proc_name, methodId)
|
||||||
|
|
||||||
|
else:
|
||||||
#
|
#
|
||||||
# The initial method check has failed, but before generating an
|
# The initial method check has failed, but before generating an
|
||||||
# error we must check whether any of the paramTypes implement
|
# error we must check whether any of the paramTypes implement
|
||||||
|
@ -129,16 +148,16 @@ class MemberMethodCallExprAST(MethodCallExprAST):
|
||||||
|
|
||||||
if implementedMethodId not in obj_type.methods:
|
if implementedMethodId not in obj_type.methods:
|
||||||
self.error("Invalid method call: " \
|
self.error("Invalid method call: " \
|
||||||
"Type '%s' does not have a method '%s' nor '%s'",
|
"Type '%s' does not have a method %s, '%s' nor '%s'",
|
||||||
obj_type, methodId, implementedMethodId)
|
obj_type, self.proc_name, methodId, implementedMethodId)
|
||||||
else:
|
else:
|
||||||
#
|
#
|
||||||
# Replace the methodId with the implementedMethodId found in
|
# Replace the methodId with the implementedMethodId found in
|
||||||
# the method list.
|
# the method list.
|
||||||
#
|
#
|
||||||
methodId = implementedMethodId
|
methodId = implementedMethodId
|
||||||
|
|
||||||
return_type = obj_type.methods[methodId].return_type
|
return_type = obj_type.methods[methodId].return_type
|
||||||
|
|
||||||
if return_type.isInterface:
|
if return_type.isInterface:
|
||||||
prefix = "static_cast<%s &>" % return_type.c_ident
|
prefix = "static_cast<%s &>" % return_type.c_ident
|
||||||
prefix = "%s((%s)." % (prefix, code)
|
prefix = "%s((%s)." % (prefix, code)
|
||||||
|
|
40
src/mem/slicc/ast/OodAST.py
Normal file
40
src/mem/slicc/ast/OodAST.py
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
#
|
||||||
|
# Copyright (c) 2011 Mark D. Hill and David A. Wood
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met: redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer;
|
||||||
|
# redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution;
|
||||||
|
# neither the name of the copyright holders nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
#
|
||||||
|
|
||||||
|
from slicc.ast.ExprAST import ExprAST
|
||||||
|
|
||||||
|
class OodAST(ExprAST):
|
||||||
|
def __init__(self, slicc):
|
||||||
|
super(OodAST, self).__init__(slicc)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "[Ood:]"
|
||||||
|
|
||||||
|
def generate(self, code):
|
||||||
|
code += "NULL"
|
||||||
|
return "OOD"
|
|
@ -45,7 +45,7 @@ class ReturnStatementAST(StatementAST):
|
||||||
self.error("Invalid 'return' statement")
|
self.error("Invalid 'return' statement")
|
||||||
|
|
||||||
# The return type must match
|
# The return type must match
|
||||||
if return_type != actual_type:
|
if actual_type != "OOD" and return_type != actual_type:
|
||||||
self.expr_ast.error("Return type miss-match, expected return " +
|
self.expr_ast.error("Return type miss-match, expected return " +
|
||||||
"type is '%s', actual is '%s'",
|
"type is '%s', actual is '%s'",
|
||||||
return_type, actual_type)
|
return_type, actual_type)
|
||||||
|
|
|
@ -27,17 +27,21 @@
|
||||||
from slicc.ast.ExprAST import ExprAST
|
from slicc.ast.ExprAST import ExprAST
|
||||||
|
|
||||||
class StaticCastAST(ExprAST):
|
class StaticCastAST(ExprAST):
|
||||||
def __init__(self, slicc, type_ast, expr_ast):
|
def __init__(self, slicc, type_ast, type_modifier, expr_ast):
|
||||||
super(StaticCastAST, self).__init__(slicc)
|
super(StaticCastAST, self).__init__(slicc)
|
||||||
|
|
||||||
self.type_ast = type_ast
|
self.type_ast = type_ast
|
||||||
self.expr_ast = expr_ast
|
self.expr_ast = expr_ast
|
||||||
|
self.type_modifier = type_modifier
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "[StaticCastAST: %r]" % self.expr_ast
|
return "[StaticCastAST: %r]" % self.expr_ast
|
||||||
|
|
||||||
def generate(self, code):
|
def generate(self, code):
|
||||||
actual_type, ecode = self.expr_ast.inline(True)
|
actual_type, ecode = self.expr_ast.inline(True)
|
||||||
|
if self.type_modifier == "pointer":
|
||||||
|
code('static_cast<${{self.type_ast.type.c_ident}} *>($ecode)')
|
||||||
|
else:
|
||||||
code('static_cast<${{self.type_ast.type.c_ident}} &>($ecode)')
|
code('static_cast<${{self.type_ast.type.c_ident}} &>($ecode)')
|
||||||
|
|
||||||
if not "interface" in self.type_ast.type:
|
if not "interface" in self.type_ast.type:
|
||||||
|
|
|
@ -50,10 +50,15 @@ class TypeDeclAST(DeclAST):
|
||||||
|
|
||||||
def generate(self):
|
def generate(self):
|
||||||
ident = str(self.type_ast)
|
ident = str(self.type_ast)
|
||||||
|
machine = self.symtab.state_machine
|
||||||
|
|
||||||
# Make the new type
|
# Make the new type
|
||||||
new_type = Type(self.symtab, ident, self.location, self.pairs,
|
new_type = Type(self.symtab, ident, self.location, self.pairs,
|
||||||
self.state_machine)
|
self.state_machine)
|
||||||
|
|
||||||
|
if machine:
|
||||||
|
machine.addType(new_type)
|
||||||
|
|
||||||
self.symtab.newSymbol(new_type)
|
self.symtab.newSymbol(new_type)
|
||||||
|
|
||||||
# Add all of the fields of the type to it
|
# Add all of the fields of the type to it
|
||||||
|
|
|
@ -46,11 +46,14 @@ from slicc.ast.FuncDeclAST import *
|
||||||
from slicc.ast.IfStatementAST import *
|
from slicc.ast.IfStatementAST import *
|
||||||
from slicc.ast.InPortDeclAST import *
|
from slicc.ast.InPortDeclAST import *
|
||||||
from slicc.ast.InfixOperatorExprAST import *
|
from slicc.ast.InfixOperatorExprAST import *
|
||||||
|
from slicc.ast.IsValidPtrExprAST import *
|
||||||
from slicc.ast.LiteralExprAST import *
|
from slicc.ast.LiteralExprAST import *
|
||||||
|
from slicc.ast.LocalVariableAST import *
|
||||||
from slicc.ast.MachineAST import *
|
from slicc.ast.MachineAST import *
|
||||||
from slicc.ast.MemberExprAST import *
|
from slicc.ast.MemberExprAST import *
|
||||||
from slicc.ast.MethodCallExprAST import *
|
from slicc.ast.MethodCallExprAST import *
|
||||||
from slicc.ast.NewExprAST import *
|
from slicc.ast.NewExprAST import *
|
||||||
|
from slicc.ast.OodAST import *
|
||||||
from slicc.ast.ObjDeclAST import *
|
from slicc.ast.ObjDeclAST import *
|
||||||
from slicc.ast.OutPortDeclAST import *
|
from slicc.ast.OutPortDeclAST import *
|
||||||
from slicc.ast.PairAST import *
|
from slicc.ast.PairAST import *
|
||||||
|
|
|
@ -165,12 +165,15 @@ class SLICC(Grammar):
|
||||||
'check_stop_slots' : 'CHECK_STOP_SLOTS',
|
'check_stop_slots' : 'CHECK_STOP_SLOTS',
|
||||||
'static_cast' : 'STATIC_CAST',
|
'static_cast' : 'STATIC_CAST',
|
||||||
'if' : 'IF',
|
'if' : 'IF',
|
||||||
|
'is_valid' : 'IS_VALID',
|
||||||
|
'is_invalid' : 'IS_INVALID',
|
||||||
'else' : 'ELSE',
|
'else' : 'ELSE',
|
||||||
'return' : 'RETURN',
|
'return' : 'RETURN',
|
||||||
'THIS' : 'THIS',
|
'THIS' : 'THIS',
|
||||||
'CHIP' : 'CHIP',
|
'CHIP' : 'CHIP',
|
||||||
'void' : 'VOID',
|
'void' : 'VOID',
|
||||||
'new' : 'NEW',
|
'new' : 'NEW',
|
||||||
|
'OOD' : 'OOD',
|
||||||
}
|
}
|
||||||
|
|
||||||
literals = ':[]{}(),='
|
literals = ':[]{}(),='
|
||||||
|
@ -576,7 +579,11 @@ class SLICC(Grammar):
|
||||||
|
|
||||||
def p_statement__static_cast(self, p):
|
def p_statement__static_cast(self, p):
|
||||||
"aexpr : STATIC_CAST '(' type ',' expr ')'"
|
"aexpr : STATIC_CAST '(' type ',' expr ')'"
|
||||||
p[0] = ast.StaticCastAST(self, p[3], p[5])
|
p[0] = ast.StaticCastAST(self, p[3], "ref", p[5])
|
||||||
|
|
||||||
|
def p_statement__static_cast_ptr(self, p):
|
||||||
|
"aexpr : STATIC_CAST '(' type ',' STRING ',' expr ')'"
|
||||||
|
p[0] = ast.StaticCastAST(self, p[3], p[5], p[7])
|
||||||
|
|
||||||
def p_statement__return(self, p):
|
def p_statement__return(self, p):
|
||||||
"statement : RETURN expr SEMI"
|
"statement : RETURN expr SEMI"
|
||||||
|
@ -603,6 +610,10 @@ class SLICC(Grammar):
|
||||||
"aexpr : var"
|
"aexpr : var"
|
||||||
p[0] = p[1]
|
p[0] = p[1]
|
||||||
|
|
||||||
|
def p_expr__localvar(self, p):
|
||||||
|
"aexpr : type ident"
|
||||||
|
p[0] = ast.LocalVariableAST(self, p[1], p[2])
|
||||||
|
|
||||||
def p_expr__literal(self, p):
|
def p_expr__literal(self, p):
|
||||||
"aexpr : literal"
|
"aexpr : literal"
|
||||||
p[0] = p[1]
|
p[0] = p[1]
|
||||||
|
@ -619,6 +630,10 @@ class SLICC(Grammar):
|
||||||
"aexpr : NEW type"
|
"aexpr : NEW type"
|
||||||
p[0] = ast.NewExprAST(self, p[2])
|
p[0] = ast.NewExprAST(self, p[2])
|
||||||
|
|
||||||
|
def p_expr__null(self, p):
|
||||||
|
"aexpr : OOD"
|
||||||
|
p[0] = ast.OodAST(self)
|
||||||
|
|
||||||
# globally access a local chip component and call a method
|
# globally access a local chip component and call a method
|
||||||
def p_expr__local_chip_method(self, p):
|
def p_expr__local_chip_method(self, p):
|
||||||
"aexpr : THIS DOT var '[' expr ']' DOT var DOT ident '(' exprs ')'"
|
"aexpr : THIS DOT var '[' expr ']' DOT var DOT ident '(' exprs ')'"
|
||||||
|
@ -687,6 +702,14 @@ class SLICC(Grammar):
|
||||||
"aexpr : '(' expr ')'"
|
"aexpr : '(' expr ')'"
|
||||||
p[0] = p[2]
|
p[0] = p[2]
|
||||||
|
|
||||||
|
def p_expr__is_valid_ptr(self, p):
|
||||||
|
"aexpr : IS_VALID '(' var ')'"
|
||||||
|
p[0] = ast.IsValidPtrExprAST(self, p[3], True)
|
||||||
|
|
||||||
|
def p_expr__is_invalid_ptr(self, p):
|
||||||
|
"aexpr : IS_INVALID '(' var ')'"
|
||||||
|
p[0] = ast.IsValidPtrExprAST(self, p[3], False)
|
||||||
|
|
||||||
def p_literal__string(self, p):
|
def p_literal__string(self, p):
|
||||||
"literal : STRING"
|
"literal : STRING"
|
||||||
p[0] = ast.LiteralExprAST(self, p[1], "std::string")
|
p[0] = ast.LiteralExprAST(self, p[1], "std::string")
|
||||||
|
|
|
@ -60,6 +60,8 @@ class Func(Symbol):
|
||||||
void_type = self.symtab.find("void", Type)
|
void_type = self.symtab.find("void", Type)
|
||||||
if "return_by_ref" in self and self.return_type != void_type:
|
if "return_by_ref" in self and self.return_type != void_type:
|
||||||
return_type += "&"
|
return_type += "&"
|
||||||
|
elif "return_by_pointer" in self and self.return_type != void_type:
|
||||||
|
return_type += "*"
|
||||||
|
|
||||||
return "%s %s(%s);" % (return_type, self.c_ident,
|
return "%s %s(%s);" % (return_type, self.c_ident,
|
||||||
", ".join(self.param_strings))
|
", ".join(self.param_strings))
|
||||||
|
@ -87,6 +89,8 @@ class Func(Symbol):
|
||||||
return_type = self.return_type.c_ident
|
return_type = self.return_type.c_ident
|
||||||
if "return_by_ref" in self and self.return_type != void_type:
|
if "return_by_ref" in self and self.return_type != void_type:
|
||||||
return_type += "&"
|
return_type += "&"
|
||||||
|
if "return_by_pointer" in self and self.return_type != void_type:
|
||||||
|
return_type += "*"
|
||||||
|
|
||||||
if self.isInternalMachineFunc:
|
if self.isInternalMachineFunc:
|
||||||
klass = "%s_Controller" % self.machineStr
|
klass = "%s_Controller" % self.machineStr
|
||||||
|
|
|
@ -46,6 +46,7 @@ class StateMachine(Symbol):
|
||||||
super(StateMachine, self).__init__(symtab, ident, location, pairs)
|
super(StateMachine, self).__init__(symtab, ident, location, pairs)
|
||||||
self.table = None
|
self.table = None
|
||||||
self.config_parameters = config_parameters
|
self.config_parameters = config_parameters
|
||||||
|
|
||||||
for param in config_parameters:
|
for param in config_parameters:
|
||||||
if param.pointer:
|
if param.pointer:
|
||||||
var = Var(symtab, param.name, location, param.type_ast.type,
|
var = Var(symtab, param.name, location, param.type_ast.type,
|
||||||
|
@ -62,6 +63,8 @@ class StateMachine(Symbol):
|
||||||
self.in_ports = []
|
self.in_ports = []
|
||||||
self.functions = []
|
self.functions = []
|
||||||
self.objects = []
|
self.objects = []
|
||||||
|
self.TBEType = None
|
||||||
|
self.EntryType = None
|
||||||
|
|
||||||
self.message_buffer_names = []
|
self.message_buffer_names = []
|
||||||
|
|
||||||
|
@ -107,6 +110,21 @@ class StateMachine(Symbol):
|
||||||
def addObject(self, obj):
|
def addObject(self, obj):
|
||||||
self.objects.append(obj)
|
self.objects.append(obj)
|
||||||
|
|
||||||
|
def addType(self, type):
|
||||||
|
type_ident = '%s' % type.c_ident
|
||||||
|
|
||||||
|
if type_ident == "%s_TBE" %self.ident:
|
||||||
|
if self.TBEType != None:
|
||||||
|
self.error("Multiple Transaction Buffer types in a " \
|
||||||
|
"single machine.");
|
||||||
|
self.TBEType = type
|
||||||
|
|
||||||
|
elif "interface" in type and "AbstractCacheEntry" == type["interface"]:
|
||||||
|
if self.EntryType != None:
|
||||||
|
self.error("Multiple AbstractCacheEntry types in a " \
|
||||||
|
"single machine.");
|
||||||
|
self.EntryType = type
|
||||||
|
|
||||||
# Needs to be called before accessing the table
|
# Needs to be called before accessing the table
|
||||||
def buildTable(self):
|
def buildTable(self):
|
||||||
assert self.table is None
|
assert self.table is None
|
||||||
|
@ -264,12 +282,35 @@ private:
|
||||||
int m_number_of_TBEs;
|
int m_number_of_TBEs;
|
||||||
|
|
||||||
TransitionResult doTransition(${ident}_Event event,
|
TransitionResult doTransition(${ident}_Event event,
|
||||||
${ident}_State state,
|
''')
|
||||||
|
|
||||||
|
if self.EntryType != None:
|
||||||
|
code('''
|
||||||
|
${{self.EntryType.c_ident}}* m_cache_entry_ptr,
|
||||||
|
''')
|
||||||
|
if self.TBEType != None:
|
||||||
|
code('''
|
||||||
|
${{self.TBEType.c_ident}}* m_tbe_ptr,
|
||||||
|
''')
|
||||||
|
|
||||||
|
code('''
|
||||||
const Address& addr);
|
const Address& addr);
|
||||||
|
|
||||||
TransitionResult doTransitionWorker(${ident}_Event event,
|
TransitionResult doTransitionWorker(${ident}_Event event,
|
||||||
${ident}_State state,
|
${ident}_State state,
|
||||||
${ident}_State& next_state,
|
${ident}_State& next_state,
|
||||||
|
''')
|
||||||
|
|
||||||
|
if self.TBEType != None:
|
||||||
|
code('''
|
||||||
|
${{self.TBEType.c_ident}}*& m_tbe_ptr,
|
||||||
|
''')
|
||||||
|
if self.EntryType != None:
|
||||||
|
code('''
|
||||||
|
${{self.EntryType.c_ident}}*& m_cache_entry_ptr,
|
||||||
|
''')
|
||||||
|
|
||||||
|
code('''
|
||||||
const Address& addr);
|
const Address& addr);
|
||||||
|
|
||||||
std::string m_name;
|
std::string m_name;
|
||||||
|
@ -299,10 +340,39 @@ static int m_num_controllers;
|
||||||
if proto:
|
if proto:
|
||||||
code('$proto')
|
code('$proto')
|
||||||
|
|
||||||
|
if self.EntryType != None:
|
||||||
|
code('''
|
||||||
|
|
||||||
|
// Set and Reset for cache_entry variable
|
||||||
|
void set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry);
|
||||||
|
void unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr);
|
||||||
|
''')
|
||||||
|
|
||||||
|
if self.TBEType != None:
|
||||||
|
code('''
|
||||||
|
|
||||||
|
// Set and Reset for tbe variable
|
||||||
|
void set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${ident}_TBE* m_new_tbe);
|
||||||
|
void unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr);
|
||||||
|
''')
|
||||||
|
|
||||||
code('''
|
code('''
|
||||||
|
|
||||||
// Actions
|
// Actions
|
||||||
''')
|
''')
|
||||||
|
if self.TBEType != None and self.EntryType != None:
|
||||||
|
for action in self.actions.itervalues():
|
||||||
|
code('/** \\brief ${{action.desc}} */')
|
||||||
|
code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr);')
|
||||||
|
elif self.TBEType != None:
|
||||||
|
for action in self.actions.itervalues():
|
||||||
|
code('/** \\brief ${{action.desc}} */')
|
||||||
|
code('void ${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, const Address& addr);')
|
||||||
|
elif self.EntryType != None:
|
||||||
|
for action in self.actions.itervalues():
|
||||||
|
code('/** \\brief ${{action.desc}} */')
|
||||||
|
code('void ${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr);')
|
||||||
|
else:
|
||||||
for action in self.actions.itervalues():
|
for action in self.actions.itervalues():
|
||||||
code('/** \\brief ${{action.desc}} */')
|
code('/** \\brief ${{action.desc}} */')
|
||||||
code('void ${{action.ident}}(const Address& addr);')
|
code('void ${{action.ident}}(const Address& addr);')
|
||||||
|
@ -731,10 +801,92 @@ void $c_ident::clearStats() {
|
||||||
code('''
|
code('''
|
||||||
m_profiler.clearStats();
|
m_profiler.clearStats();
|
||||||
}
|
}
|
||||||
|
''')
|
||||||
|
|
||||||
|
if self.EntryType != None:
|
||||||
|
code('''
|
||||||
|
|
||||||
|
// Set and Reset for cache_entry variable
|
||||||
|
void
|
||||||
|
$c_ident::set_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, AbstractCacheEntry* m_new_cache_entry)
|
||||||
|
{
|
||||||
|
m_cache_entry_ptr = (${{self.EntryType.c_ident}}*)m_new_cache_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
$c_ident::unset_cache_entry(${{self.EntryType.c_ident}}*& m_cache_entry_ptr)
|
||||||
|
{
|
||||||
|
m_cache_entry_ptr = 0;
|
||||||
|
}
|
||||||
|
''')
|
||||||
|
|
||||||
|
if self.TBEType != None:
|
||||||
|
code('''
|
||||||
|
|
||||||
|
// Set and Reset for tbe variable
|
||||||
|
void
|
||||||
|
$c_ident::set_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.TBEType.c_ident}}* m_new_tbe)
|
||||||
|
{
|
||||||
|
m_tbe_ptr = m_new_tbe;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
$c_ident::unset_tbe(${{self.TBEType.c_ident}}*& m_tbe_ptr)
|
||||||
|
{
|
||||||
|
m_tbe_ptr = NULL;
|
||||||
|
}
|
||||||
|
''')
|
||||||
|
|
||||||
|
code('''
|
||||||
|
|
||||||
// Actions
|
// Actions
|
||||||
''')
|
''')
|
||||||
|
if self.TBEType != None and self.EntryType != None:
|
||||||
|
for action in self.actions.itervalues():
|
||||||
|
if "c_code" not in action:
|
||||||
|
continue
|
||||||
|
|
||||||
|
code('''
|
||||||
|
/** \\brief ${{action.desc}} */
|
||||||
|
void
|
||||||
|
$c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, ${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr)
|
||||||
|
{
|
||||||
|
DPRINTF(RubyGenerated, "executing\\n");
|
||||||
|
${{action["c_code"]}}
|
||||||
|
}
|
||||||
|
|
||||||
|
''')
|
||||||
|
elif self.TBEType != None:
|
||||||
|
for action in self.actions.itervalues():
|
||||||
|
if "c_code" not in action:
|
||||||
|
continue
|
||||||
|
|
||||||
|
code('''
|
||||||
|
/** \\brief ${{action.desc}} */
|
||||||
|
void
|
||||||
|
$c_ident::${{action.ident}}(${{self.TBEType.c_ident}}*& m_tbe_ptr, const Address& addr)
|
||||||
|
{
|
||||||
|
DPRINTF(RubyGenerated, "executing\\n");
|
||||||
|
${{action["c_code"]}}
|
||||||
|
}
|
||||||
|
|
||||||
|
''')
|
||||||
|
elif self.EntryType != None:
|
||||||
|
for action in self.actions.itervalues():
|
||||||
|
if "c_code" not in action:
|
||||||
|
continue
|
||||||
|
|
||||||
|
code('''
|
||||||
|
/** \\brief ${{action.desc}} */
|
||||||
|
void
|
||||||
|
$c_ident::${{action.ident}}(${{self.EntryType.c_ident}}*& m_cache_entry_ptr, const Address& addr)
|
||||||
|
{
|
||||||
|
DPRINTF(RubyGenerated, "executing\\n");
|
||||||
|
${{action["c_code"]}}
|
||||||
|
}
|
||||||
|
|
||||||
|
''')
|
||||||
|
else:
|
||||||
for action in self.actions.itervalues():
|
for action in self.actions.itervalues():
|
||||||
if "c_code" not in action:
|
if "c_code" not in action:
|
||||||
continue
|
continue
|
||||||
|
@ -777,9 +929,6 @@ using namespace std;
|
||||||
void
|
void
|
||||||
${ident}_Controller::wakeup()
|
${ident}_Controller::wakeup()
|
||||||
{
|
{
|
||||||
// DEBUG_EXPR(GENERATED_COMP, MedPrio, *this);
|
|
||||||
// DEBUG_EXPR(GENERATED_COMP, MedPrio, g_eventQueue_ptr->getTime());
|
|
||||||
|
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
// Some cases will put us into an infinite loop without this limit
|
// Some cases will put us into an infinite loop without this limit
|
||||||
|
@ -850,9 +999,29 @@ ${ident}_Controller::wakeup()
|
||||||
|
|
||||||
TransitionResult
|
TransitionResult
|
||||||
${ident}_Controller::doTransition(${ident}_Event event,
|
${ident}_Controller::doTransition(${ident}_Event event,
|
||||||
${ident}_State state,
|
''')
|
||||||
|
if self.EntryType != None:
|
||||||
|
code('''
|
||||||
|
${{self.EntryType.c_ident}}* m_cache_entry_ptr,
|
||||||
|
''')
|
||||||
|
if self.TBEType != None:
|
||||||
|
code('''
|
||||||
|
${{self.TBEType.c_ident}}* m_tbe_ptr,
|
||||||
|
''')
|
||||||
|
code('''
|
||||||
const Address &addr)
|
const Address &addr)
|
||||||
{
|
{
|
||||||
|
''')
|
||||||
|
if self.TBEType != None and self.EntryType != None:
|
||||||
|
code('${ident}_State state = ${ident}_getState(m_tbe_ptr, m_cache_entry_ptr, addr);')
|
||||||
|
elif self.TBEType != None:
|
||||||
|
code('${ident}_State state = ${ident}_getState(m_tbe_ptr, addr);')
|
||||||
|
elif self.EntryType != None:
|
||||||
|
code('${ident}_State state = ${ident}_getState(m_cache_entry_ptr, addr);')
|
||||||
|
else:
|
||||||
|
code('${ident}_State state = ${ident}_getState(addr);')
|
||||||
|
|
||||||
|
code('''
|
||||||
${ident}_State next_state = state;
|
${ident}_State next_state = state;
|
||||||
|
|
||||||
DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %s\\n",
|
DPRINTF(RubyGenerated, "%s, Time: %lld, state: %s, event: %s, addr: %s\\n",
|
||||||
|
@ -863,8 +1032,17 @@ ${ident}_Controller::doTransition(${ident}_Event event,
|
||||||
addr);
|
addr);
|
||||||
|
|
||||||
TransitionResult result =
|
TransitionResult result =
|
||||||
doTransitionWorker(event, state, next_state, addr);
|
''')
|
||||||
|
if self.TBEType != None and self.EntryType != None:
|
||||||
|
code('doTransitionWorker(event, state, next_state, m_tbe_ptr, m_cache_entry_ptr, addr);')
|
||||||
|
elif self.TBEType != None:
|
||||||
|
code('doTransitionWorker(event, state, next_state, m_tbe_ptr, addr);')
|
||||||
|
elif self.EntryType != None:
|
||||||
|
code('doTransitionWorker(event, state, next_state, m_cache_entry_ptr, addr);')
|
||||||
|
else:
|
||||||
|
code('doTransitionWorker(event, state, next_state, addr);')
|
||||||
|
|
||||||
|
code('''
|
||||||
if (result == TransitionResult_Valid) {
|
if (result == TransitionResult_Valid) {
|
||||||
DPRINTF(RubyGenerated, "next_state: %s\\n",
|
DPRINTF(RubyGenerated, "next_state: %s\\n",
|
||||||
${ident}_State_to_string(next_state));
|
${ident}_State_to_string(next_state));
|
||||||
|
@ -877,7 +1055,17 @@ ${ident}_Controller::doTransition(${ident}_Event event,
|
||||||
addr, GET_TRANSITION_COMMENT());
|
addr, GET_TRANSITION_COMMENT());
|
||||||
|
|
||||||
CLEAR_TRANSITION_COMMENT();
|
CLEAR_TRANSITION_COMMENT();
|
||||||
${ident}_setState(addr, next_state);
|
''')
|
||||||
|
if self.TBEType != None and self.EntryType != None:
|
||||||
|
code('${ident}_setState(m_tbe_ptr, m_cache_entry_ptr, addr, next_state);')
|
||||||
|
elif self.TBEType != None:
|
||||||
|
code('${ident}_setState(m_tbe_ptr, addr, next_state);')
|
||||||
|
elif self.EntryType != None:
|
||||||
|
code('${ident}_setState(m_cache_entry_ptr, addr, next_state);')
|
||||||
|
else:
|
||||||
|
code('${ident}_setState(addr, next_state);')
|
||||||
|
|
||||||
|
code('''
|
||||||
} else if (result == TransitionResult_ResourceStall) {
|
} else if (result == TransitionResult_ResourceStall) {
|
||||||
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\\n",
|
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\\n",
|
||||||
g_eventQueue_ptr->getTime(), m_version, "${ident}",
|
g_eventQueue_ptr->getTime(), m_version, "${ident}",
|
||||||
|
@ -902,6 +1090,17 @@ TransitionResult
|
||||||
${ident}_Controller::doTransitionWorker(${ident}_Event event,
|
${ident}_Controller::doTransitionWorker(${ident}_Event event,
|
||||||
${ident}_State state,
|
${ident}_State state,
|
||||||
${ident}_State& next_state,
|
${ident}_State& next_state,
|
||||||
|
''')
|
||||||
|
|
||||||
|
if self.TBEType != None:
|
||||||
|
code('''
|
||||||
|
${{self.TBEType.c_ident}}*& m_tbe_ptr,
|
||||||
|
''')
|
||||||
|
if self.EntryType != None:
|
||||||
|
code('''
|
||||||
|
${{self.EntryType.c_ident}}*& m_cache_entry_ptr,
|
||||||
|
''')
|
||||||
|
code('''
|
||||||
const Address& addr)
|
const Address& addr)
|
||||||
{
|
{
|
||||||
switch(HASH_FUN(state, event)) {
|
switch(HASH_FUN(state, event)) {
|
||||||
|
@ -949,6 +1148,16 @@ if (!%s.areNSlotsAvailable(%s))
|
||||||
|
|
||||||
if stall:
|
if stall:
|
||||||
case('return TransitionResult_ProtocolStall;')
|
case('return TransitionResult_ProtocolStall;')
|
||||||
|
else:
|
||||||
|
if self.TBEType != None and self.EntryType != None:
|
||||||
|
for action in actions:
|
||||||
|
case('${{action.ident}}(m_tbe_ptr, m_cache_entry_ptr, addr);')
|
||||||
|
elif self.TBEType != None:
|
||||||
|
for action in actions:
|
||||||
|
case('${{action.ident}}(m_tbe_ptr, addr);')
|
||||||
|
elif self.EntryType != None:
|
||||||
|
for action in actions:
|
||||||
|
case('${{action.ident}}(m_cache_entry_ptr, addr);')
|
||||||
else:
|
else:
|
||||||
for action in actions:
|
for action in actions:
|
||||||
case('${{action.ident}}(addr);')
|
case('${{action.ident}}(addr);')
|
||||||
|
|
Loading…
Reference in a new issue