1066 lines
36 KiB
Text
1066 lines
36 KiB
Text
|
|
/*
|
|
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are
|
|
* met: redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer;
|
|
* redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution;
|
|
* neither the name of the copyright holders nor the names of its
|
|
* contributors may be used to endorse or promote products derived from
|
|
* this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
* $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
|
|
*
|
|
*/
|
|
|
|
machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|
: CacheMemory * L2cacheMemory,
|
|
int l2_request_latency = 2,
|
|
int l2_response_latency = 2,
|
|
int to_l1_latency = 1
|
|
{
|
|
|
|
// L2 BANK QUEUES
|
|
// From local bank of L2 cache TO the network
|
|
MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> Memory
|
|
MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> a local L1
|
|
MessageBuffer responseFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> a local L1 || Memory
|
|
|
|
// FROM the network to this local bank of L2 cache
|
|
MessageBuffer unblockToL2Cache, network="From", virtual_network="2", ordered="false"; // a local L1 || Memory -> this L2 bank
|
|
MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
|
|
MessageBuffer responseToL2Cache, network="From", virtual_network="1", ordered="false"; // a local L1 || Memory -> this L2 bank
|
|
// MessageBuffer unblockToL2Cache, network="From", virtual_network="4", ordered="false"; // a local L1 || Memory -> this L2 bank
|
|
|
|
// STATES
|
|
enumeration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
|
|
// Base states
|
|
NP, desc="Not present in either cache";
|
|
SS, desc="L2 cache entry Shared, also present in one or more L1s";
|
|
M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
|
|
MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
|
|
|
|
// L2 replacement
|
|
M_I, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
|
|
MT_I, desc="L2 cache replacing, getting data from exclusive";
|
|
MCT_I, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
|
|
I_I, desc="L2 replacing clean data, need to inv sharers and then drop data";
|
|
S_I, desc="L2 replacing dirty data, collecting acks from L1s";
|
|
|
|
// Transient States for fetching data from memory
|
|
ISS, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
|
|
IS, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
|
|
IM, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
|
|
|
|
// Blocking states
|
|
SS_MB, desc="Blocked for L1_GETX from SS";
|
|
MT_MB, desc="Blocked for L1_GETX from MT";
|
|
M_MB, desc="Blocked for L1_GETX from M";
|
|
|
|
MT_IIB, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
|
|
MT_IB, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
|
|
MT_SB, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
|
|
|
|
}
|
|
|
|
// EVENTS
|
|
enumeration(Event, desc="L2 Cache events") {
|
|
// L2 events
|
|
|
|
// events initiated by the local L1s
|
|
L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
|
|
L1_GETS, desc="a L1D GETS request for a block maped to us";
|
|
L1_GETX, desc="a L1D GETX request for a block maped to us";
|
|
L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
|
|
|
|
L1_PUTX, desc="L1 replacing data";
|
|
L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
|
|
|
|
Fwd_L1_GETX, desc="L1 did not have data, so we supply";
|
|
Fwd_L1_GETS, desc="L1 did not have data, so we supply";
|
|
Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
|
|
|
|
// events initiated by this L2
|
|
L2_Replacement, desc="L2 Replacement", format="!r";
|
|
L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
|
|
|
|
// events from memory controller
|
|
Mem_Data, desc="data from memory", format="!r";
|
|
Mem_Ack, desc="ack from memory", format="!r";
|
|
|
|
// M->S data writeback
|
|
WB_Data, desc="data from L1";
|
|
WB_Data_clean, desc="clean data from L1";
|
|
Ack, desc="writeback ack";
|
|
Ack_all, desc="writeback ack";
|
|
|
|
Unblock, desc="Unblock from L1 requestor";
|
|
Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
|
|
Exclusive_Unblock, desc="Unblock from L1 requestor";
|
|
|
|
MEM_Inv, desc="Invalidation from directory";
|
|
|
|
}
|
|
|
|
// TYPES
|
|
|
|
// CacheEntry
|
|
structure(Entry, desc="...", interface="AbstractCacheEntry") {
|
|
State CacheState, desc="cache state";
|
|
NetDest Sharers, desc="tracks the L1 shares on-chip";
|
|
MachineID Exclusive, desc="Exclusive holder of block";
|
|
DataBlock DataBlk, desc="data for the block";
|
|
bool Dirty, default="false", desc="data is dirty";
|
|
}
|
|
|
|
// TBE fields
|
|
structure(TBE, desc="...") {
|
|
Address Address, desc="Physical address for this TBE";
|
|
State TBEState, desc="Transient state";
|
|
DataBlock DataBlk, desc="Buffer for the data block";
|
|
bool Dirty, default="false", desc="Data is Dirty";
|
|
|
|
NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
|
|
MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
|
|
bool isPrefetch, desc="Set if this was caused by a prefetch";
|
|
|
|
int pendingAcks, desc="number of pending acks for invalidates during writeback";
|
|
}
|
|
|
|
external_type(TBETable) {
|
|
TBE lookup(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
bool isPresent(Address);
|
|
}
|
|
|
|
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
|
|
|
|
// inclusive cache, returns L2 entries only
|
|
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
|
|
return static_cast(Entry, L2cacheMemory[addr]);
|
|
}
|
|
|
|
void changeL2Permission(Address addr, AccessPermission permission) {
|
|
if (L2cacheMemory.isTagPresent(addr)) {
|
|
return L2cacheMemory.changePermission(addr, permission);
|
|
}
|
|
}
|
|
|
|
string getCoherenceRequestTypeStr(CoherenceRequestType type) {
|
|
return CoherenceRequestType_to_string(type);
|
|
}
|
|
|
|
bool isL2CacheTagPresent(Address addr) {
|
|
return (L2cacheMemory.isTagPresent(addr));
|
|
}
|
|
|
|
bool isOneSharerLeft(Address addr, MachineID requestor) {
|
|
assert(getL2CacheEntry(addr).Sharers.isElement(requestor));
|
|
return (getL2CacheEntry(addr).Sharers.count() == 1);
|
|
}
|
|
|
|
bool isSharer(Address addr, MachineID requestor) {
|
|
if (L2cacheMemory.isTagPresent(addr)) {
|
|
return getL2CacheEntry(addr).Sharers.isElement(requestor);
|
|
} else {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
void addSharer(Address addr, MachineID requestor) {
|
|
//DEBUG_EXPR(machineID);
|
|
//DEBUG_EXPR(requestor);
|
|
//DEBUG_EXPR(addr);
|
|
getL2CacheEntry(addr).Sharers.add(requestor);
|
|
}
|
|
|
|
State getState(Address addr) {
|
|
if(L2_TBEs.isPresent(addr)) {
|
|
return L2_TBEs[addr].TBEState;
|
|
} else if (isL2CacheTagPresent(addr)) {
|
|
return getL2CacheEntry(addr).CacheState;
|
|
}
|
|
return State:NP;
|
|
}
|
|
|
|
string getStateStr(Address addr) {
|
|
return L2Cache_State_to_string(getState(addr));
|
|
}
|
|
|
|
// when is this called
|
|
void setState(Address addr, State state) {
|
|
|
|
// MUST CHANGE
|
|
if (L2_TBEs.isPresent(addr)) {
|
|
L2_TBEs[addr].TBEState := state;
|
|
}
|
|
|
|
if (isL2CacheTagPresent(addr)) {
|
|
getL2CacheEntry(addr).CacheState := state;
|
|
|
|
// Set permission
|
|
if (state == State:SS ) {
|
|
changeL2Permission(addr, AccessPermission:Read_Only);
|
|
} else if (state == State:M) {
|
|
changeL2Permission(addr, AccessPermission:Read_Write);
|
|
} else if (state == State:MT) {
|
|
changeL2Permission(addr, AccessPermission:Stale);
|
|
} else {
|
|
changeL2Permission(addr, AccessPermission:Busy);
|
|
}
|
|
}
|
|
}
|
|
|
|
Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
|
|
if(type == CoherenceRequestType:GETS) {
|
|
return Event:L1_GETS;
|
|
} else if(type == CoherenceRequestType:GET_INSTR) {
|
|
return Event:L1_GET_INSTR;
|
|
} else if (type == CoherenceRequestType:GETX) {
|
|
return Event:L1_GETX;
|
|
} else if (type == CoherenceRequestType:UPGRADE) {
|
|
if ( isL2CacheTagPresent(addr) && getL2CacheEntry(addr).Sharers.isElement(requestor) ) {
|
|
return Event:L1_UPGRADE;
|
|
} else {
|
|
return Event:L1_GETX;
|
|
}
|
|
} else if (type == CoherenceRequestType:PUTX) {
|
|
if (isSharer(addr, requestor)) {
|
|
return Event:L1_PUTX;
|
|
} else {
|
|
return Event:L1_PUTX_old;
|
|
}
|
|
} else {
|
|
DEBUG_EXPR(addr);
|
|
DEBUG_EXPR(type);
|
|
error("Invalid L1 forwarded request type");
|
|
}
|
|
}
|
|
|
|
// ** OUT_PORTS **
|
|
|
|
out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
|
|
out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
|
|
out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
|
|
|
|
|
|
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
|
|
if(L1unblockNetwork_in.isReady()) {
|
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
DEBUG_EXPR(in_msg.Sender);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
DEBUG_EXPR(in_msg.Destination);
|
|
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
|
|
trigger(Event:Exclusive_Unblock, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
|
trigger(Event:Unblock, in_msg.Address);
|
|
} else {
|
|
error("unknown unblock message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// Response IntraChip L2 Network - response msg to this particular L2 bank
|
|
in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
|
|
if (responseIntraChipL2Network_in.isReady()) {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
// test wether it's from a local L1 or an off chip source
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
|
if(in_msg.Type == CoherenceResponseType:DATA) {
|
|
if (in_msg.Dirty) {
|
|
trigger(Event:WB_Data, in_msg.Address);
|
|
} else {
|
|
trigger(Event:WB_Data_clean, in_msg.Address);
|
|
}
|
|
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
|
if ((L2_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0) {
|
|
trigger(Event:Ack_all, in_msg.Address);
|
|
} else {
|
|
trigger(Event:Ack, in_msg.Address);
|
|
}
|
|
} else {
|
|
error("unknown message type");
|
|
}
|
|
|
|
} else { // external message
|
|
if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
|
trigger(Event:Mem_Data, in_msg.Address); // L2 now has data and all off-chip acks
|
|
} else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
|
|
trigger(Event:Mem_Ack, in_msg.Address); // L2 now has data and all off-chip acks
|
|
} else if(in_msg.Type == CoherenceResponseType:INV) {
|
|
trigger(Event:MEM_Inv, in_msg.Address); // L2 now has data and all off-chip acks
|
|
} else {
|
|
error("unknown message type");
|
|
}
|
|
}
|
|
}
|
|
} // if not ready, do nothing
|
|
}
|
|
|
|
// L1 Request
|
|
in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
|
|
if(L1RequestIntraChipL2Network_in.isReady()) {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
//DEBUG_EXPR(id);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
//DEBUG_EXPR(in_msg.Requestor);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
//DEBUG_EXPR(in_msg.Destination);
|
|
assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
|
|
// The L2 contains the block, so proceeded with handling the request
|
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
|
|
} else {
|
|
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
|
|
// L2 does't have the line, but we have space for it in the L2
|
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.Requestor), in_msg.Address);
|
|
} else {
|
|
// No room in the L2, so we need to make room before handling the request
|
|
if (getL2CacheEntry( L2cacheMemory.cacheProbe(in_msg.Address) ).Dirty ) {
|
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
|
} else {
|
|
trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
// ACTIONS
|
|
|
|
action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency=l2_request_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:GETS;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := in_msg.Type;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination.add(getL2CacheEntry(address).Exclusive);
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(c_exclusiveReplacement, "c", desc="Send data to memory") {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
|
|
action(c_exclusiveCleanReplacement, "cc", desc="Send ack to memory for clean replacement") {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
}
|
|
}
|
|
|
|
|
|
action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.DataBlk := L2_TBEs[address].DataBlk;
|
|
out_msg.Dirty := L2_TBEs[address].Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
|
|
|
|
action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
|
|
out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
|
|
if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
|
|
out_msg.AckCount := out_msg.AckCount + 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
|
|
out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count();
|
|
if (getL2CacheEntry(address).Sharers.isElement(in_msg.Requestor)) {
|
|
out_msg.AckCount := out_msg.AckCount + 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
out_msg.AckCount := 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
|
|
assert(L2_TBEs[address].L1_GetS_IDs.count() > 0);
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
|
|
action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
|
|
assert(L2_TBEs[address].L1_GetS_IDs.count() == 1);
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
|
|
|
|
action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID);
|
|
//DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getL2CacheEntry(address).Dirty;
|
|
DEBUG_EXPR(out_msg.Address);
|
|
//DEBUG_EXPR(out_msg.Destination);
|
|
//DEBUG_EXPR(out_msg.DataBlk);
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
|
|
|
|
action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
}
|
|
}
|
|
|
|
action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination := getL2CacheEntry(address).Sharers;
|
|
out_msg.Destination.remove(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
// OTHER ACTIONS
|
|
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
|
|
check_allocate(L2_TBEs);
|
|
L2_TBEs.allocate(address);
|
|
L2_TBEs[address].L1_GetS_IDs.clear();
|
|
L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
|
|
L2_TBEs[address].Dirty := getL2CacheEntry(address).Dirty;
|
|
L2_TBEs[address].pendingAcks := getL2CacheEntry(address).Sharers.count();
|
|
}
|
|
|
|
action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
|
|
L2_TBEs.deallocate(address);
|
|
}
|
|
|
|
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
|
|
profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
|
|
}
|
|
|
|
action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
|
|
profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
|
|
}
|
|
|
|
|
|
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
|
|
profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
|
|
}
|
|
|
|
|
|
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
|
getL2CacheEntry(address).Dirty := in_msg.Dirty;
|
|
}
|
|
}
|
|
|
|
action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
|
getL2CacheEntry(address).Dirty := in_msg.Dirty;
|
|
}
|
|
}
|
|
|
|
action(q_updateAck, "q", desc="update pending ack count") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
L2_TBEs[address].pendingAcks := L2_TBEs[address].pendingAcks - in_msg.AckCount;
|
|
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
|
|
APPEND_TRANSITION_COMMENT(" p: ");
|
|
APPEND_TRANSITION_COMMENT(L2_TBEs[address].pendingAcks);
|
|
}
|
|
}
|
|
|
|
action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
L2_TBEs[address].DataBlk := in_msg.DataBlk;
|
|
L2_TBEs[address].Dirty := in_msg.Dirty;
|
|
}
|
|
}
|
|
|
|
|
|
action(z_stall, "z", desc="Stall") {
|
|
}
|
|
|
|
|
|
action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].L1_GetS_IDs.add(in_msg.Requestor);
|
|
}
|
|
}
|
|
|
|
action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].L1_GetX_ID := in_msg.Requestor;
|
|
}
|
|
}
|
|
|
|
action(set_setMRU, "\set", desc="set the MRU entry") {
|
|
L2cacheMemory.setMRU(address);
|
|
}
|
|
|
|
action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
|
|
if (L2cacheMemory.isTagPresent(address) == false) {
|
|
L2cacheMemory.allocate(address, new Entry);
|
|
}
|
|
}
|
|
|
|
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
|
L2cacheMemory.deallocate(address);
|
|
}
|
|
|
|
action(t_sendWBAck, "t", desc="Send writeback ACK") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:WB_ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
// upgrader doesn't get ack from itself, hence the + 1
|
|
out_msg.AckCount := 0 - getL2CacheEntry(address).Sharers.count() + 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
//profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.Requestor));
|
|
}
|
|
}
|
|
|
|
action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
// profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
addSharer(address, in_msg.Requestor);
|
|
APPEND_TRANSITION_COMMENT( getL2CacheEntry(address).Sharers );
|
|
}
|
|
}
|
|
|
|
action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
|
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
|
addSharer(address, in_msg.Sender);
|
|
}
|
|
}
|
|
|
|
|
|
action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
getL2CacheEntry(address).Sharers.remove(in_msg.Requestor);
|
|
}
|
|
}
|
|
|
|
action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
getL2CacheEntry(address).Sharers.clear();
|
|
}
|
|
}
|
|
|
|
action(mm_markExclusive, "\m", desc="set the exclusive owner") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
getL2CacheEntry(address).Sharers.clear();
|
|
getL2CacheEntry(address).Exclusive := in_msg.Requestor;
|
|
addSharer(address, in_msg.Requestor);
|
|
}
|
|
}
|
|
|
|
action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
|
|
peek(L1unblockNetwork_in, ResponseMsg) {
|
|
getL2CacheEntry(address).Sharers.clear();
|
|
getL2CacheEntry(address).Exclusive := in_msg.Sender;
|
|
addSharer(address, in_msg.Sender);
|
|
}
|
|
}
|
|
|
|
action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
|
|
L1RequestIntraChipL2Network_in.recycle();
|
|
}
|
|
|
|
action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
|
|
responseIntraChipL2Network_in.recycle();
|
|
}
|
|
|
|
|
|
//*****************************************************
|
|
// TRANSITIONS
|
|
//*****************************************************
|
|
|
|
|
|
//===============================================
|
|
// BASE STATE - I
|
|
|
|
// Transitions from I (Idle)
|
|
transition({NP, IS, ISS, IM, SS, M, M_I, MT_I, MCT_I, I_I, S_I, SS_MB, M_MB, MT_IIB, MT_IB, MT_SB}, L1_PUTX) {
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition({NP, SS, M, MT, M_I, MT_I, MCT_I, I_I, S_I, IS, ISS, IM, SS_MB, MT_MB, M_MB, MT_IIB, MT_IB, MT_SB}, L1_PUTX_old) {
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
|
|
zz_recycleL1RequestQueue;
|
|
}
|
|
|
|
transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
|
|
zn_recycleResponseNetwork;
|
|
}
|
|
|
|
transition({S_I, M_I, MT_I}, MEM_Inv) {
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
|
|
transition({SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
|
|
zz_recycleL1RequestQueue;
|
|
}
|
|
|
|
|
|
transition(NP, L1_GETS, ISS) {
|
|
qq_allocateL2CacheBlock;
|
|
ll_clearSharers;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
ss_recordGetSL1ID;
|
|
a_issueFetchToMemory;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(NP, L1_GET_INSTR, IS) {
|
|
qq_allocateL2CacheBlock;
|
|
ll_clearSharers;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
ss_recordGetSL1ID;
|
|
a_issueFetchToMemory;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(NP, L1_GETX, IM) {
|
|
qq_allocateL2CacheBlock;
|
|
ll_clearSharers;
|
|
// nn_addSharer;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
a_issueFetchToMemory;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
|
|
// transitions from IS/IM
|
|
|
|
transition(ISS, Mem_Data, MT_MB) {
|
|
m_writeDataToCache;
|
|
ex_sendExclusiveDataToGetSRequestors;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(IS, Mem_Data, SS) {
|
|
m_writeDataToCache;
|
|
e_sendDataToGetSRequestors;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(IM, Mem_Data, MT_MB) {
|
|
m_writeDataToCache;
|
|
ee_sendDataToGetXRequestor;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}, IS) {
|
|
nn_addSharer;
|
|
ss_recordGetSL1ID;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition({IS, ISS}, L1_GETX) {
|
|
zz_recycleL1RequestQueue;
|
|
}
|
|
|
|
transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
|
|
zz_recycleL1RequestQueue;
|
|
}
|
|
|
|
// transitions from SS
|
|
transition(SS, {L1_GETS, L1_GET_INSTR}) {
|
|
ds_sendSharedDataToRequestor;
|
|
nn_addSharer;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
|
|
transition(SS, L1_GETX, SS_MB) {
|
|
d_sendDataToRequestor;
|
|
// fw_sendFwdInvToSharers;
|
|
fwm_sendFwdInvToSharersMinusRequestor;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(SS, L1_UPGRADE, SS_MB) {
|
|
fwm_sendFwdInvToSharersMinusRequestor;
|
|
ts_sendInvAckToUpgrader;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(SS, L2_Replacement_clean, I_I) {
|
|
i_allocateTBE;
|
|
f_sendInvToSharers;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(SS, {L2_Replacement, MEM_Inv}, S_I) {
|
|
i_allocateTBE;
|
|
f_sendInvToSharers;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
|
|
transition(M, L1_GETX, MT_MB) {
|
|
d_sendDataToRequestor;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(M, L1_GET_INSTR, SS) {
|
|
d_sendDataToRequestor;
|
|
nn_addSharer;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(M, L1_GETS, MT_MB) {
|
|
dd_sendExclusiveDataToRequestor;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(M, {L2_Replacement, MEM_Inv}, M_I) {
|
|
i_allocateTBE;
|
|
c_exclusiveReplacement;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(M, L2_Replacement_clean, M_I) {
|
|
i_allocateTBE;
|
|
c_exclusiveCleanReplacement;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
|
|
// transitions from MT
|
|
|
|
transition(MT, L1_GETX, MT_MB) {
|
|
b_forwardRequestToExclusive;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
|
|
transition(MT, {L1_GETS, L1_GET_INSTR}, MT_IIB) {
|
|
b_forwardRequestToExclusive;
|
|
uu_profileMiss;
|
|
set_setMRU;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(MT, {L2_Replacement, MEM_Inv}, MT_I) {
|
|
i_allocateTBE;
|
|
f_sendInvToSharers;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(MT, L2_Replacement_clean, MCT_I) {
|
|
i_allocateTBE;
|
|
f_sendInvToSharers;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(MT, L1_PUTX, M) {
|
|
ll_clearSharers;
|
|
mr_writeDataToCacheFromRequest;
|
|
t_sendWBAck;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
|
|
// transitions from blocking states
|
|
transition(SS_MB, Unblock_Cancel, SS) {
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
transition(MT_MB, Unblock_Cancel, MT) {
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
transition(MT_IB, Unblock_Cancel, MT) {
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
transition(SS_MB, Exclusive_Unblock, MT) {
|
|
// update actual directory
|
|
mmu_markExclusiveFromUnblock;
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
transition({M_MB, MT_MB}, Exclusive_Unblock, MT) {
|
|
// update actual directory
|
|
mmu_markExclusiveFromUnblock;
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
transition(MT_IIB, Unblock, MT_IB) {
|
|
nnu_addSharerFromUnblock;
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
|
|
m_writeDataToCache;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
|
|
m_writeDataToCache;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(MT_SB, Unblock, SS) {
|
|
nnu_addSharerFromUnblock;
|
|
k_popUnblockQueue;
|
|
}
|
|
|
|
// writeback states
|
|
transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
|
|
zz_recycleL1RequestQueue;
|
|
}
|
|
|
|
transition(I_I, Ack) {
|
|
q_updateAck;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(I_I, Ack_all, M_I) {
|
|
c_exclusiveCleanReplacement;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition({MT_I, MCT_I}, WB_Data, M_I) {
|
|
qq_writeDataToTBE;
|
|
ct_exclusiveReplacementFromTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(MCT_I, {WB_Data_clean, Ack_all}, M_I) {
|
|
c_exclusiveCleanReplacement;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// L1 never changed Dirty data
|
|
transition(MT_I, Ack_all, M_I) {
|
|
ct_exclusiveReplacementFromTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
|
|
// drop this because L1 will send data again
|
|
// the reason we don't accept is that the request virtual network may be completely backed up
|
|
// transition(MT_I, L1_PUTX) {
|
|
// jj_popL1RequestQueue;
|
|
//}
|
|
|
|
// possible race between unblock and immediate replacement
|
|
transition(MT_MB, L1_PUTX) {
|
|
zz_recycleL1RequestQueue;
|
|
}
|
|
|
|
transition(MT_I, WB_Data_clean, NP) {
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(S_I, Ack) {
|
|
q_updateAck;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(S_I, Ack_all, M_I) {
|
|
ct_exclusiveReplacementFromTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(M_I, Mem_Ack, NP) {
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
}
|
|
|
|
|
|
|