2191 lines
78 KiB
Text
2191 lines
78 KiB
Text
|
|
/*
|
|
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are
|
|
* met: redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer;
|
|
* redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution;
|
|
* neither the name of the copyright holders nor the names of its
|
|
* contributors may be used to endorse or promote products derived from
|
|
* this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
* $Id$
|
|
*
|
|
*/
|
|
|
|
machine(L2Cache, "MOSI Directory L2 Cache CMP") {
|
|
|
|
// L2 BANK QUEUES
|
|
// From local bank of L2 cache TO the network
|
|
MessageBuffer dummyFrom0, network="To", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
|
|
MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
|
|
MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="2", ordered="true"; // this L2 bank -> a local L1
|
|
MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || mod-directory
|
|
MessageBuffer finalAckFromL2Cache, network="To", virtual_network="4", ordered="false"; // this L2 bank -> mod-directory
|
|
|
|
// FROM the network to this local bank of L2 cache
|
|
//MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="true"; // a local L1 -> this L2 bank
|
|
MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="true"; // a local L1 -> this L2 bank
|
|
MessageBuffer dummyTo1, network="From", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
|
|
MessageBuffer forwardedRequestToL2Cache, network="From", virtual_network="2", ordered="true"; // mod-directory -> this L2 bank
|
|
MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || mod-directory -> this L2 bank
|
|
MessageBuffer dummyTo4, network="From", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
|
|
|
|
// STATES
|
|
enumeration(State, desc="L2 Cache states", default="L2Cache_State_L2_NP") {
|
|
// Base states
|
|
L2_NP, desc="Not present in either cache";
|
|
L2_I, desc="L2 cache entry Idle";
|
|
L2_S, desc="L2 cache entry Shared, not present in any local L1s";
|
|
L2_O, desc="L2 cache entry Owned, not present in any local L1s";
|
|
L2_M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
|
|
L2_SS, desc="L2 cache entry Shared, also present in one or more L1s";
|
|
L2_SO, desc="L2 cache entry Owned, also present in one or more L1s or ext L2s";
|
|
L2_MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
|
|
|
|
// Transient States
|
|
|
|
// Transient States from I
|
|
L2_IS, desc="L2 idle, issued GETS, have not seen response yet";
|
|
L2_ISZ, desc="L2 idle, issued GETS, saw a L1_GETX, have not seen data for GETS yet", format="!b";
|
|
L2_ISI, desc="L2 idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
|
|
L2_IMV, desc="L2 idle, issued GETX, valid int L1, have not seen response(s) yet";
|
|
L2_MV, desc="L2 modified, a valid old L1 copy exist, external world gave write permission";
|
|
L2_IM, desc="L2 idle, issued GETX, no valid int L1, have not seen response(s) yet";
|
|
L2_IMO, desc="L2 idle, issued GETX, saw forwarded GETS";
|
|
L2_IMI, desc="L2 idle, issued GETX, saw forwarded GETX";
|
|
L2_IMZ, desc="L2 idle, issued GETX, saw another L1_GETX";
|
|
L2_IMOI, desc="L2 idle, issued GETX, saw GETS, saw forwarded GETX";
|
|
L2_IMOZ, desc="L2 idle, issued GETX, saw GETS, then a L1_GETX";
|
|
|
|
// Invalidation steps for S -> I
|
|
L2_SIC, desc="L2 shared, L2_INV, valid L1 copies exist, issued invalidates, have not seen responses yet";
|
|
L2_SIV, desc="L2 shared, L2_Replacement, valid L1 copies exist, issued invalidates, have not seen responses yet";
|
|
|
|
// Invalidation steps for M -> I for L2 Repalcement
|
|
L2_MIV, desc="L2 modified, a valid L1 copy exist, issued forced writeback, have not seen the response yet";
|
|
L2_MIN, desc="L2 modified, no valid L1 copies, issued PUTX, have not seen response yet";
|
|
|
|
// Invalidation steps for M -> I for a Forwarded GetX
|
|
L2_MIC, desc="L2 modified, a valid L1 copy exist, issued forced writeback, have not seen the response yet";
|
|
|
|
// In MT state and see another L1_GETX request
|
|
L2_MIT, desc="L2 modified, a valid L1 copy exist, saw L1_GETX, issued INV, have not seen the response yet";
|
|
|
|
// Downgrade steps for M -> SO
|
|
L2_MO, desc="L2 modified, a valid L1 copy exist, issued downgrade request, have not seen response yet";
|
|
L2_MOIC, desc="L2 modified, a valid L1 copy exist, issued downgrade request, saw INV, have not seen response yet";
|
|
L2_MOICR, desc="L2 modified, a valid L1 copy exist, issued invalidate request, saw INV, have not seen response yet";
|
|
L2_MOZ, desc="L2 modified, a valid L1 copy exist, issued downgrade request, saw L1_GETX, have not seen response yet";
|
|
|
|
// Invalidation steps for O/SO -> I for L2 Replacement
|
|
L2_OIV, desc="L2 owned, valid L1 copies exist, issued invalidates, have not seen responses yet from L1s";
|
|
L2_OIN, desc="L2 owned, no valid L1 copies, issued PUTX, have not seen response yet from dir";
|
|
|
|
// Invalidation steps for SO -> I for a Forwarded GetX
|
|
L2_OIC, desc="L2 owned, valid L1 copies exist, issued invalidates, have not seen responses yet from L1s";
|
|
|
|
// Strange OM states
|
|
// Note: strange states, because it is waiting for the line
|
|
// to be stolen away, or look like it has been stolen away. The
|
|
// common case is that we see a forward from the directory that is
|
|
// really from us, we forwarded the data to our dataqueue, and
|
|
// everythings works fine.
|
|
L2_OMV, desc="L2 owned and valid L1 copies, issued GETX and invalidates, have not seen responses yet";
|
|
L2_OM, desc="L2 owned and no valid L1 copies, issued GETX, have not seen response yet";
|
|
}
|
|
|
|
// EVENTS
|
|
enumeration(Event, desc="L2 Cache events") {
|
|
// L2 events
|
|
|
|
// events initiated by the local L1s
|
|
L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
|
|
L1_GETS, desc="a L1D GETS request for a block maped to us";
|
|
L1_GETX, desc="a L1D GETX request for a block maped to us";
|
|
L1_UPGRADE, desc="a L1D UPGRADE request for a block maped to us";
|
|
L1_UPGRADE_no_others, desc="a L1D UPGRADE request for a block maped to us, requestor is the only on-chip sharer";
|
|
L1_PUTX, desc="a L1D PUTX request for a block maped to us (L1 replacement of a modified block)";
|
|
L1_PUTX_last, desc="a L1D PUTX request for a block maped to us (L1 replacement of a modified block) last sharer";
|
|
L1_PUTX_old, desc="an old L1D PUTX request for a block maped to us (L1 replacement of a modified block)";
|
|
L1_PUTS, desc="a L1 replacement of a shared block", format="!r";
|
|
L1_PUTS_last, desc="a L1 replacement of the last local L1 shared block", format="!r";
|
|
L1_PUTS_old, desc="an old L1 replacement of a shared block", format="!r";
|
|
|
|
// events of local L1 responses
|
|
Proc_int_ack, "Proc on-chip L1 Cache ack", desc="Ack from on-chip L1 Cache";
|
|
Proc_last_int_ack, "Proc last on-chip L1 Cache ack", desc="Last on-chip L1 Cache ack", format="!r";
|
|
|
|
Data_int_ack, "Data int ack", desc="Received modified data from L1 now proceed in handling miss";
|
|
|
|
// events initiated by the external L2s
|
|
Forwarded_GETS, "Forwarded GETS", desc="Directory forwards Inter-chip GETS to us";
|
|
Forwarded_GET_INSTR, "Forwarded GETINSTR", desc="Inter-chip Forwarded GETINSTR";
|
|
Forwarded_GETX, "Forwarded GETX", desc="Directory forwards Inter-chip GETX to us";
|
|
L2_INV, "L2_INV", desc="L2 Invalidation initiated from other L2", format="!r";
|
|
|
|
// events initiated by this L2
|
|
L2_Replacement, desc="L2 Replacement", format="!r";
|
|
|
|
// events of external L2 responses
|
|
Proc_ext_ack, "Proc off-chip ack", desc="Ack from off-chip";
|
|
Proc_last_ext_ack, "Proc last off-chip ack", desc="Last off-chip ack", format="!r";
|
|
|
|
Data_ext_ack_0, "Data ack 0", desc="Data with ack count = 0";
|
|
Data_ext_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
|
|
// Data_ext_ack_not_0_last: is when the requestor has seen all acks but the directory has not, therefore
|
|
// the directory must be told that we now have the data
|
|
Data_ext_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
|
|
|
|
Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
|
|
Dir_exe_ack, "Only copy", desc="Directory tells us we already have exclusive permission, go directly to MT state";
|
|
}
|
|
|
|
// TYPES
|
|
|
|
// CacheEntry
|
|
structure(Entry, desc="...", interface="AbstractCacheEntry") {
|
|
State CacheState, desc="cache state";
|
|
NetDest Sharers, desc="tracks the L1 shares on-chip";
|
|
DataBlock DataBlk, desc="data for the block";
|
|
}
|
|
|
|
// TBE fields
|
|
structure(TBE, desc="...") {
|
|
Address Address, desc="Physical address for this TBE";
|
|
State TBEState, desc="Transient state";
|
|
DataBlock DataBlk, desc="Buffer for the data block";
|
|
int NumPendingExtAcks, desc="Number of ext acks that this L2 bank is waiting for";
|
|
int NumPendingIntAcks, desc="Number of int acks that this L2 bank is waiting for";
|
|
NetDest Forward_GetS_IDs, desc="Set of the external processors to forward the block";
|
|
NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
|
|
MachineID Forward_GetX_ID, desc="ID of the L2 cache to forward the block";
|
|
MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
|
|
MachineID InvalidatorID, desc="ID of the L2 cache (needed for L2_SS -> L2_I)";
|
|
int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
|
|
bool isPrefetch, desc="Set if this was caused by a prefetch";
|
|
bool isThreeHop, desc="is this request a three hop";
|
|
bool validForwardedGetXId, desc="Indicate whether a forwarded GetX ID is valid";
|
|
bool validInvalidator, desc="Indicate whether an invalidator is valid";
|
|
bool isInternalRequestOnly, desc="Is internal request only, i.e. only L1s";
|
|
}
|
|
|
|
external_type(CacheMemory) {
|
|
bool cacheAvail(Address);
|
|
Address cacheProbe(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
Entry lookup(Address);
|
|
void changePermission(Address, AccessPermission);
|
|
bool isTagPresent(Address);
|
|
void setMRU(Address);
|
|
}
|
|
|
|
external_type(TBETable) {
|
|
TBE lookup(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
bool isPresent(Address);
|
|
}
|
|
|
|
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
|
|
|
|
CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
|
|
|
|
// inclusive cache, returns L2 entries only
|
|
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
|
|
return L2cacheMemory[addr];
|
|
}
|
|
|
|
void changeL2Permission(Address addr, AccessPermission permission) {
|
|
if (L2cacheMemory.isTagPresent(addr)) {
|
|
return L2cacheMemory.changePermission(addr, permission);
|
|
}
|
|
}
|
|
|
|
string getCoherenceRequestTypeStr(CoherenceRequestType type) {
|
|
return CoherenceRequestType_to_string(type);
|
|
}
|
|
|
|
bool isL2CacheTagPresent(Address addr) {
|
|
return (L2cacheMemory.isTagPresent(addr));
|
|
}
|
|
|
|
bool isOneSharerLeft(Address addr, MachineID requestor) {
|
|
assert(L2cacheMemory[addr].Sharers.isElement(requestor));
|
|
return (L2cacheMemory[addr].Sharers.count() == 1);
|
|
}
|
|
|
|
bool isSharer(Address addr, MachineID requestor) {
|
|
if (L2cacheMemory.isTagPresent(addr)) {
|
|
return L2cacheMemory[addr].Sharers.isElement(requestor);
|
|
} else {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
void addSharer(Address addr, MachineID requestor) {
|
|
DEBUG_EXPR(machineID);
|
|
DEBUG_EXPR(requestor);
|
|
DEBUG_EXPR(addr);
|
|
assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
|
|
L2cacheMemory[addr].Sharers.add(requestor);
|
|
}
|
|
|
|
State getState(Address addr) {
|
|
if(L2_TBEs.isPresent(addr)) {
|
|
return L2_TBEs[addr].TBEState;
|
|
} else if (isL2CacheTagPresent(addr)) {
|
|
return getL2CacheEntry(addr).CacheState;
|
|
}
|
|
return State:L2_NP;
|
|
}
|
|
|
|
string getStateStr(Address addr) {
|
|
return L2Cache_State_to_string(getState(addr));
|
|
}
|
|
|
|
// when is this called
|
|
void setState(Address addr, State state) {
|
|
|
|
// MUST CHANGE
|
|
if (L2_TBEs.isPresent(addr)) {
|
|
L2_TBEs[addr].TBEState := state;
|
|
}
|
|
|
|
if (isL2CacheTagPresent(addr)) {
|
|
getL2CacheEntry(addr).CacheState := state;
|
|
|
|
// Set permission
|
|
if (state == State:L2_I ||
|
|
state == State:L2_SIC || state == State:L2_SIV ||
|
|
state == State:L2_MIV || state == State:L2_MIN || state == State:L2_MIC || state == State:L2_MIT ||
|
|
state == State:L2_OIV || state == State:L2_OIN || state == State:L2_OIC) {
|
|
changeL2Permission(addr, AccessPermission:Invalid);
|
|
} else if (state == State:L2_S || state == State:L2_O || state == State:L2_SS || state == State:L2_SO) {
|
|
changeL2Permission(addr, AccessPermission:Read_Only);
|
|
} else if (state == State:L2_OM || state == State:L2_OMV) {
|
|
changeL2Permission(addr, AccessPermission:ReadUpgradingToWrite);
|
|
} else if (state == State:L2_M) {
|
|
changeL2Permission(addr, AccessPermission:Read_Write);
|
|
} else if (state == State:L2_MT) {
|
|
changeL2Permission(addr, AccessPermission:Stale);
|
|
} else {
|
|
changeL2Permission(addr, AccessPermission:Busy);
|
|
}
|
|
}
|
|
}
|
|
|
|
Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
|
|
if(type == CoherenceRequestType:GETS) {
|
|
return Event:L1_GETS;
|
|
} else if(type == CoherenceRequestType:GET_INSTR) {
|
|
return Event:L1_GET_INSTR;
|
|
} else if (type == CoherenceRequestType:GETX) {
|
|
return Event:L1_GETX;
|
|
} else if (type == CoherenceRequestType:UPGRADE) {
|
|
if (isSharer(addr, requestor)) {
|
|
if (isOneSharerLeft(addr, requestor)) {
|
|
return Event:L1_UPGRADE_no_others;
|
|
} else {
|
|
return Event:L1_UPGRADE;
|
|
}
|
|
} else { // possible that we removed the line from the L2 before we could process the UPGRADE request
|
|
return Event:L1_GETX;
|
|
}
|
|
} else if (type == CoherenceRequestType:PUTX) {
|
|
if (isSharer(addr, requestor)) {
|
|
if (isOneSharerLeft(addr, requestor)) {
|
|
return Event:L1_PUTX_last;
|
|
} else {
|
|
return Event:L1_PUTX;
|
|
}
|
|
} else {
|
|
return Event:L1_PUTX_old;
|
|
}
|
|
} else if (type == CoherenceRequestType:PUTS) {
|
|
if (isSharer(addr, requestor)) {
|
|
if (isOneSharerLeft(addr, requestor)) {
|
|
return Event:L1_PUTS_last;
|
|
} else {
|
|
return Event:L1_PUTS;
|
|
}
|
|
} else { // possible that we removed the line from the L2 before we could process the L1_PUTS request
|
|
return Event:L1_PUTS_old;
|
|
}
|
|
} else {
|
|
DEBUG_EXPR(addr);
|
|
DEBUG_EXPR(type);
|
|
error("Invalid L1 forwarded request type");
|
|
}
|
|
}
|
|
|
|
// ** OUT_PORTS **
|
|
// All ports output to the same CMP network, NI determines where to route msg
|
|
|
|
out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
|
|
out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
|
|
out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
|
|
out_port(finalAckIntraChipL2Network_out, ResponseMsg, finalAckFromL2Cache);
|
|
|
|
// ** IN_PORTS **
|
|
|
|
in_port(dummyTo1_in, RequestMsg, dummyTo1) {
|
|
if (dummyTo1_in.isReady()) {
|
|
peek(dummyTo1_in, RequestMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(id);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
DEBUG_EXPR(in_msg.RequestorMachId);
|
|
}
|
|
error("dummyTo1 port should not be used");
|
|
}
|
|
}
|
|
|
|
in_port(dummyTo4_in, ResponseMsg, dummyTo4) {
|
|
if (dummyTo4_in.isReady()) {
|
|
peek(dummyTo4_in, ResponseMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(id);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
DEBUG_EXPR(in_msg.SenderMachId);
|
|
}
|
|
error("dummyTo4 port should not be used");
|
|
}
|
|
}
|
|
|
|
// Response IntraChip L2 Network - response msg to this particular L2 bank
|
|
in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
|
|
if (responseIntraChipL2Network_in.isReady()) {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(id);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
DEBUG_EXPR(in_msg.SenderMachId);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
DEBUG_EXPR(in_msg.NumPendingExtAcks);
|
|
// test wether it's from a local L1 or an off chip source
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L1Cache) {
|
|
if(in_msg.Type == CoherenceResponseType:DATA) {
|
|
if(L2_TBEs[in_msg.Address].NumPendingIntAcks == 1) {
|
|
trigger(Event:Data_int_ack, in_msg.Address); // L1 now has data and all on-chip acks
|
|
} else {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(L2_TBEs[in_msg.Address].NumPendingIntAcks);
|
|
error("Invalid L1 sent data when L2 wasn't expecting it");
|
|
}
|
|
} else if(in_msg.Type == CoherenceResponseType:INV_ACK) {
|
|
if(L2_TBEs.isPresent(in_msg.Address)) { // FIXME - possible to get a L1 ack after the transaction is completed
|
|
if(L2_TBEs[in_msg.Address].NumPendingIntAcks == 1) {
|
|
trigger(Event:Proc_last_int_ack, in_msg.Address); // L1 now has all on-chip acks
|
|
} else {
|
|
trigger(Event:Proc_int_ack, in_msg.Address); // process on-chip ack
|
|
}
|
|
}
|
|
}
|
|
} else { // external message
|
|
if(in_msg.Type == CoherenceResponseType:DATA) {
|
|
if(in_msg.NumPendingExtAcks == 0) {
|
|
trigger(Event:Data_ext_ack_0, in_msg.Address); // L2 now has data and all off-chip acks
|
|
} else {
|
|
if(in_msg.NumPendingExtAcks + L2_TBEs[in_msg.Address].NumPendingExtAcks != 0) {
|
|
trigger(Event:Data_ext_ack_not_0, in_msg.Address);
|
|
} else {
|
|
trigger(Event:Data_ext_ack_not_0_last, in_msg.Address);
|
|
}
|
|
}
|
|
} else if(in_msg.Type == CoherenceResponseType:ACK) {
|
|
if(L2_TBEs[in_msg.Address].NumPendingExtAcks != 1){
|
|
trigger(Event:Proc_ext_ack, in_msg.Address);
|
|
} else {
|
|
trigger(Event:Proc_last_ext_ack, in_msg.Address);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} // if not ready, do nothing
|
|
}
|
|
|
|
// Forwarded Request from Directory
|
|
in_port(forwardedRequestIntraChipL2Network_in, RequestMsg, forwardedRequestToL2Cache) {
|
|
if(forwardedRequestIntraChipL2Network_in.isReady()) {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(id);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
DEBUG_EXPR(in_msg.RequestorMachId);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
if(in_msg.Type == CoherenceRequestType:GETS) {
|
|
trigger(Event:Forwarded_GETS, in_msg.Address); // L2
|
|
} else if(in_msg.Type == CoherenceRequestType:GET_INSTR) {
|
|
trigger(Event:Forwarded_GET_INSTR, in_msg.Address); // L2
|
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
|
trigger(Event:Forwarded_GETX, in_msg.Address); // L2
|
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
|
trigger(Event:L2_INV, in_msg.Address); // L2
|
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
|
trigger(Event:Dir_WB_ack, in_msg.Address); // L2
|
|
} else if (in_msg.Type == CoherenceRequestType:EXE_ACK) {
|
|
trigger(Event:Dir_exe_ack, in_msg.Address); // L2
|
|
} else {
|
|
error("Invalid L2 forwarded request type");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// L1 Request
|
|
in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
|
|
if(L1RequestIntraChipL2Network_in.isReady()) {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(id);
|
|
DEBUG_EXPR(version);
|
|
DEBUG_EXPR(getState(in_msg.Address));
|
|
DEBUG_EXPR(in_msg.RequestorMachId);
|
|
DEBUG_EXPR(in_msg.Type);
|
|
DEBUG_EXPR(in_msg.Destination);
|
|
assert(machineIDToMachineType(in_msg.RequestorMachId) == MachineType:L1Cache);
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
|
|
// The L2 contains the block, so proceeded with handling the request
|
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.RequestorMachId), in_msg.Address);
|
|
} else {
|
|
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
|
|
// L2 does't have the line, but we have space for it in the L2
|
|
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.RequestorMachId), in_msg.Address);
|
|
} else {
|
|
// No room in the L2, so we need to make room before handling the request
|
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// ACTIONS
|
|
|
|
action(a_issueGETS, "a", desc="Issue GETS") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:GETS;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
|
|
out_msg.L2CacheStateStr := getStateStr(address);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(b_issueGETX, "b", desc="Issue GETX") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:GETX;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
|
|
out_msg.L2CacheStateStr := getStateStr(address);
|
|
}
|
|
}
|
|
}
|
|
|
|
// finalAck issued from the response queue
|
|
action(c_finalAckToDirIfNeeded, "c", desc="Send FinalAck to dir if this is response to 3-hop xfer") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
DEBUG_EXPR(in_msg);
|
|
if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache) {
|
|
enqueue(finalAckIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY"){
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:FINALACK;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
DEBUG_EXPR(out_msg);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// finalAck issued from TBE
|
|
action(n_sendFinalAckIfThreeHop, "n", desc=""){
|
|
peek(responseIntraChipL2Network_in, ResponseMsg){
|
|
DEBUG_EXPR(in_msg);
|
|
if(L2_TBEs[address].isThreeHop == true){
|
|
enqueue(finalAckIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY"){
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:FINALACK;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
DEBUG_EXPR(out_msg);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(mm_rememberIfFinalAckNeeded, "\m", desc=""){
|
|
peek(responseIntraChipL2Network_in, ResponseMsg){
|
|
if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache){
|
|
L2_TBEs[address].isThreeHop := true;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(d_issuePUTX, "d", desc="Issue PUTX") {
|
|
enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:PUTX;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
DEBUG_EXPR(out_msg.Address);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
DEBUG_EXPR(out_msg.DataBlk);
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
out_msg.L1CacheStateStr := "NA";
|
|
out_msg.L2CacheStateStr := getStateStr(address);
|
|
}
|
|
}
|
|
|
|
action(f_issueGETINSTR, "f", desc="Issue GETINSTR") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:GET_INSTR;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
|
|
out_msg.L2CacheStateStr := getStateStr(address);
|
|
}
|
|
}
|
|
}
|
|
|
|
// DELAYED RESPONSES - Sorced from a TBE entry
|
|
// TBE -> L1
|
|
action(h_issueLoadHit, "h", desc="If not prefetch, notify sequencer the load completed.") {
|
|
DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
|
|
if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
|
|
// Non-prefetch
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // could be multiple internal nodes
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
} else {
|
|
// Prefetch - don't issue hit msg
|
|
}
|
|
}
|
|
|
|
action(oo_issueLoadHitInv, "\o", desc="If not prefetch, notify sequencer the load completed.") {
|
|
DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
|
|
if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
|
|
// Non-prefetch
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_I;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // could be multiple internal nodes
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
} else {
|
|
// Prefetch - don't issue hit msg
|
|
}
|
|
|
|
}
|
|
|
|
action(hh_issueStoreHit, "\h", desc="If not prefetch, issue store hit message to local L1 requestor") {
|
|
DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
|
|
if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
|
|
// Non-prefetch
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
} else {
|
|
// Prefetch - don't issue hit msg
|
|
}
|
|
}
|
|
|
|
action(pp_issueStoreHitInv, "\p", desc="If not prefetch, issue store hit message to local L1 requestor") {
|
|
DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
|
|
if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
|
|
// Non-prefetch
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_I;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
} else {
|
|
// Prefetch - don't issue hit msg
|
|
}
|
|
}
|
|
|
|
action(cc_issueStoreHitDG, "\c", desc="If not prefetch, issue store hit message to local L1 requestor") {
|
|
DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
|
|
if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
|
|
// Non-prefetch
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_S;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
} else {
|
|
// Prefetch - don't issue hit msg
|
|
}
|
|
}
|
|
|
|
action(w_sendPutAckToL1Cache, "w", desc="send acknowledgement of an L1 replacement") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(in_msg.RequestorMachId); // a single node
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
// TBE -> L1s and L2s
|
|
action(ee_dataFromL2CacheToGetSIDs, "\e", desc="Send data from cache to all GetS IDs") {
|
|
// FIXME - In some cases this should be from the TBE, not the cache.
|
|
// may send to other mod-L2s
|
|
if (L2_TBEs[address].Forward_GetS_IDs.count() > 0) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination := L2_TBEs[address].Forward_GetS_IDs; // external nodes
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.NumPendingExtAcks := 0;
|
|
DEBUG_EXPR(out_msg.Address);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
DEBUG_EXPR(out_msg.DataBlk);
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
// may send to local L1s
|
|
if (L2_TBEs[address].L1_GetS_IDs.count() > 0) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
// TBE -> L2s only
|
|
action(bb_dataFromL2CacheToGetSForwardIDs, "\b", desc="Send data from cache to GetS ForwardIDs") {
|
|
// FIXME - In some cases this should be from the TBE, not the cache.
|
|
if ((L2_TBEs[address].Forward_GetS_IDs.count() > 0) || (L2_TBEs[address].L1_GetS_IDs.count() > 0)) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination := L2_TBEs[address].Forward_GetS_IDs; // external nodes
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.NumPendingExtAcks := 0;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
// TBE -> L2 only
|
|
action(gg_dataFromL2CacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
|
|
// FIXME - In some cases this should be from the TBE, not the cache.
|
|
if (L2_TBEs[address].validForwardedGetXId) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(L2_TBEs[address].Forward_GetX_ID);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.NumPendingExtAcks := L2_TBEs[address].ForwardGetX_AckCount;
|
|
DEBUG_EXPR(out_msg.Address);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
DEBUG_EXPR(out_msg.DataBlk);
|
|
DEBUG_EXPR(out_msg.NumPendingExtAcks);
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
// IMMEDIATE RESPONSES directly from the ForwardRequest queue
|
|
// ForwardRequest -> L2
|
|
action(e_dataFromL2CacheToL2Requestor, "e", desc="Send data from cache to requestor") {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.NumPendingExtAcks := in_msg.NumPendingExtAcks; // Needed when in state O and we see a GetX
|
|
out_msg.Destination.add(in_msg.RequestorMachId);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
DEBUG_EXPR(out_msg.Address);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
DEBUG_EXPR(out_msg.DataBlk);
|
|
DEBUG_EXPR(out_msg.NumPendingExtAcks);
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
// ForwardRequest -> L1
|
|
action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data from cache to L1 requestor") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(in_msg.RequestorMachId);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
// OTHER ACTIONS
|
|
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
|
|
check_allocate(L2_TBEs);
|
|
L2_TBEs.allocate(address);
|
|
L2_TBEs[address].NumPendingIntAcks := 0; // default value
|
|
L2_TBEs[address].NumPendingExtAcks := 0; // default value
|
|
L2_TBEs[address].isPrefetch := false;
|
|
L2_TBEs[address].isThreeHop := false;
|
|
L2_TBEs[address].Forward_GetS_IDs.clear();
|
|
L2_TBEs[address].L1_GetS_IDs.clear();
|
|
L2_TBEs[address].validInvalidator := false;
|
|
L2_TBEs[address].validForwardedGetXId := false;
|
|
L2_TBEs[address].isInternalRequestOnly := false;
|
|
}
|
|
|
|
action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
|
|
L2_TBEs.deallocate(address);
|
|
}
|
|
|
|
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
|
|
profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
|
|
}
|
|
|
|
action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
|
|
profileMsgDelay(2, forwardedRequestIntraChipL2Network_in.dequeue_getDelayCycles());
|
|
}
|
|
|
|
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
|
|
profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
|
|
}
|
|
|
|
action(p_addNumberOfPendingExtAcks, "p", desc="Add number of pending acks to TBE") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
|
|
L2_TBEs[address].NumPendingExtAcks := L2_TBEs[address].NumPendingExtAcks + in_msg.NumPendingExtAcks;
|
|
DEBUG_EXPR(in_msg.NumPendingExtAcks);
|
|
DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
|
|
}
|
|
}
|
|
|
|
action(q_decrementNumberOfPendingExtAcks, "q", desc="Decrement number of pending ext invalidations by one") {
|
|
DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
|
|
L2_TBEs[address].NumPendingExtAcks := L2_TBEs[address].NumPendingExtAcks - 1;
|
|
DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
|
|
}
|
|
|
|
action(r_decrementNumberOfPendingIntAcks, "r", desc="Decrement number of pending int invalidations by one") {
|
|
DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
|
|
L2_TBEs[address].NumPendingIntAcks := L2_TBEs[address].NumPendingIntAcks - 1;
|
|
DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
|
|
}
|
|
|
|
action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(in_msg.RequestorMachId);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.NumPendingExtAcks := 0;
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(u_writeDataFromResponseQueueToL2Cache, "u", desc="Write data from response queue to cache") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
|
}
|
|
}
|
|
|
|
// FIXME - probably need to change this to a seperate low priority request queue
|
|
action(m_writeDataFromRequestQueueToL2Cache, "m", desc="Write data from response queue to cache") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
|
|
}
|
|
}
|
|
|
|
action(x_copyDataFromL2CacheToTBE, "x", desc="Copy data from cache to TBE") {
|
|
L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
|
|
}
|
|
|
|
action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.NumPendingExtAcks := in_msg.NumPendingExtAcks;
|
|
out_msg.Destination.add(in_msg.RequestorMachId);
|
|
out_msg.DataBlk := L2_TBEs[address].DataBlk;
|
|
DEBUG_EXPR(out_msg.Address);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
DEBUG_EXPR(out_msg.DataBlk);
|
|
DEBUG_EXPR(out_msg.NumPendingExtAcks);
|
|
out_msg.MessageSize := MessageSizeType:Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(zz_sendAckToQueuedInvalidator, "\z", desc="Send ack to invalidator") {
|
|
if (L2_TBEs[address].validInvalidator) {
|
|
enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.SenderMachId := machineID;
|
|
out_msg.Destination.add(L2_TBEs[address].InvalidatorID);
|
|
DEBUG_EXPR(out_msg.Destination);
|
|
out_msg.NumPendingExtAcks := 0;
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(z_stall, "z", desc="Stall") {
|
|
}
|
|
|
|
action(yy_recordInvalidatorID, "\y", desc="Record Invalidator for future response") {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].InvalidatorID := in_msg.RequestorMachId;
|
|
L2_TBEs[address].validInvalidator := true;
|
|
}
|
|
}
|
|
|
|
action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].Forward_GetS_IDs.add(in_msg.RequestorMachId);
|
|
}
|
|
}
|
|
|
|
action(ss_recordGetSL1ID, "\s", desc="Record forwarded L1 GetS for load response") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].L1_GetS_IDs.add(in_msg.RequestorMachId);
|
|
}
|
|
}
|
|
|
|
action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
|
|
peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].Forward_GetX_ID := in_msg.RequestorMachId;
|
|
L2_TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingExtAcks;
|
|
L2_TBEs[address].validForwardedGetXId := true;
|
|
}
|
|
}
|
|
|
|
action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
L2_TBEs[address].L1_GetX_ID := in_msg.RequestorMachId;
|
|
}
|
|
}
|
|
|
|
action(set_setMRU, "\set", desc="set the MRU entry") {
|
|
L2cacheMemory.setMRU(address);
|
|
}
|
|
|
|
action(bbb_setPendingIntAcksToSharers, "\bb", desc="Set number of pending acks equal to number of sharers") {
|
|
L2_TBEs[address].NumPendingIntAcks := L2cacheMemory[address].Sharers.count();
|
|
}
|
|
|
|
action(ddd_setPendingIntAcksToOne, "\dd", desc="Set number of pending acks equal to one") {
|
|
L2_TBEs[address].NumPendingIntAcks := 1;
|
|
}
|
|
|
|
action(ccc_setPendingIntAcksMinusOne, "\cc", desc="Set number of pending acks equal to number of sharers minus one") {
|
|
L2_TBEs[address].NumPendingIntAcks := L2cacheMemory[address].Sharers.count() - 1;
|
|
}
|
|
|
|
action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
|
|
if (L2cacheMemory.isTagPresent(address) == false) {
|
|
L2cacheMemory.allocate(address);
|
|
}
|
|
}
|
|
|
|
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
|
L2cacheMemory.deallocate(address);
|
|
}
|
|
|
|
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
//profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.RequestorMachId));
|
|
}
|
|
}
|
|
|
|
action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
|
|
}
|
|
}
|
|
|
|
action(v_issueInvalidateIntL1copyRequest, "v", desc="invalidate the L1 M copy") {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination := L2cacheMemory[address].Sharers;
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
|
|
action(tt_issueSharedInvalidateIntL1copiesRequest, "\t", desc="invalidate all L1 S copies") {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV_S;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination := L2cacheMemory[address].Sharers;
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
|
|
action(vv_issueInvalidateOtherIntL1copiesRequest, "\v", desc="invalidate other L1 copies not the local requestor") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
if ((L2cacheMemory[address].Sharers.count() > 1) || (L2cacheMemory[address].Sharers.isElement(in_msg.RequestorMachId) != true)) {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV_S;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination := L2cacheMemory[address].Sharers;
|
|
out_msg.Destination.remove(in_msg.RequestorMachId);
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(g_issueDownGradeIntL1copiesRequest, "g", desc="DownGrade L1 copy") {
|
|
enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:L1_DG;
|
|
out_msg.RequestorMachId := machineID;
|
|
out_msg.Destination := L2cacheMemory[address].Sharers;
|
|
out_msg.MessageSize := MessageSizeType:Control;
|
|
}
|
|
}
|
|
|
|
action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
addSharer(address, in_msg.RequestorMachId);
|
|
}
|
|
}
|
|
|
|
action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
L2cacheMemory[address].Sharers.remove(in_msg.RequestorMachId);
|
|
}
|
|
}
|
|
|
|
action(aa_removeResponseSharer, "\a", desc="Remove L1 Response sharer from list") {
|
|
peek(responseIntraChipL2Network_in, ResponseMsg) {
|
|
L2cacheMemory[address].Sharers.remove(in_msg.SenderMachId);
|
|
}
|
|
}
|
|
|
|
action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
|
|
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
|
|
L2cacheMemory[address].Sharers.clear();
|
|
}
|
|
}
|
|
|
|
//*****************************************************
|
|
// TRANSITIONS
|
|
//*****************************************************
|
|
|
|
//===============================================
|
|
// STALLS
|
|
|
|
// Stalls L2 Replacement and L1 PUT for all transient states
|
|
transition({L2_IS, L2_ISZ, L2_ISI, L2_IMV, L2_MV, L2_IM, L2_IMO, L2_IMI, L2_IMZ, L2_IMOI, L2_IMOZ,
|
|
L2_SIV, L2_SIC,
|
|
L2_MIV, L2_MIN, L2_MIC, L2_MIT, L2_MO, L2_MOIC, L2_MOICR, L2_MOZ,
|
|
L2_OIV, L2_OIN, L2_OIC, L2_OMV, L2_OM},
|
|
{L2_Replacement, L1_PUTX, L1_PUTX_last, L1_PUTS, L1_PUTS_last, L1_PUTX_old, L1_PUTS_old, }) {
|
|
z_stall;
|
|
}
|
|
|
|
//===============================================
|
|
// old L1_PUT requests
|
|
|
|
transition({L2_NP, L2_I, L2_S, L2_SS, L2_M, L2_MT, L2_O, L2_SO}, {L1_PUTX_old, L1_PUTS_old}) {
|
|
w_sendPutAckToL1Cache;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
//===============================================
|
|
// BASE STATE - I
|
|
|
|
// Transitions from I (Idle)
|
|
transition({L2_NP,L2_I}, L2_Replacement) {
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition({L2_NP,L2_I}, L2_INV) { // could see an invalidate from the directory, but not Forwards
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition({L2_NP,L2_I}, L1_GETS, L2_IS) {
|
|
qq_allocateL2CacheBlock;
|
|
ll_clearSharers;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
ss_recordGetSL1ID;
|
|
a_issueGETS;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition({L2_NP,L2_I}, L1_GET_INSTR, L2_IS) {
|
|
qq_allocateL2CacheBlock;
|
|
ll_clearSharers;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
ss_recordGetSL1ID;
|
|
f_issueGETINSTR;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition({L2_NP,L2_I}, {L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}, L2_IM) { // UPGRADE possible because L2_Replacement have higher priority
|
|
qq_allocateL2CacheBlock;
|
|
ll_clearSharers;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
b_issueGETX;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// Transitions from L2_IS
|
|
// could see L2_INVs or more L1 requests
|
|
transition(L2_IS, L2_INV, L2_ISI) { // could see an invalidate from the directory, but not Forwards
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IS, Data_ext_ack_0, L2_SS) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
h_issueLoadHit;
|
|
c_finalAckToDirIfNeeded;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IS, {L1_GETS,L1_GET_INSTR}) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
ss_recordGetSL1ID;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_IS, L1_GETX, L2_ISZ) { // don't go there, just go to stall state
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_ISZ
|
|
// could see L2_INVs or more L1 requests
|
|
// stall all L1 requests, wait for data
|
|
transition(L2_ISZ, L2_INV, L2_ISI) { // could see an invalidate from the directory, but not Forwards
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_ISZ, Data_ext_ack_0, L2_SS) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
h_issueLoadHit;
|
|
c_finalAckToDirIfNeeded;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_ISZ, {L1_GETS, L1_GET_INSTR, L1_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_ISI, already sent the invalidate ack so can imediately go to I
|
|
// - in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
|
|
// or, could get data from Dir if Dir's data lost race to Dir's INV
|
|
// or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
|
|
// processor already wrote it back
|
|
transition(L2_ISI, Data_ext_ack_0, L2_I) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
oo_issueLoadHitInv;
|
|
c_finalAckToDirIfNeeded;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_ISI, L2_INV) { // could see an invalidate from the directory, but not Forwards
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_ISI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_IMV, waiting for int_acks
|
|
// currently stall all request
|
|
// could see forwards and/or more L1 requests
|
|
transition(L2_IMV, L2_INV) { // could see an invalidate for SS
|
|
yy_recordInvalidatorID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
// stall all Forwarded request
|
|
transition(L2_IMV, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// stall all L1 request
|
|
transition(L2_IMV, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
|
|
z_stall;
|
|
}
|
|
|
|
transition(L2_IMV, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MV) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
c_finalAckToDirIfNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMV, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMV, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMV, Proc_last_ext_ack, L2_MV) {
|
|
n_sendFinalAckIfThreeHop;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMV, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMV, Proc_last_int_ack, L2_IM) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
zz_sendAckToQueuedInvalidator;
|
|
}
|
|
|
|
// Transitions from L2_MV, waiting for int_acks
|
|
// external world gave us write permission
|
|
|
|
// stall all Forwarded request
|
|
transition(L2_MV, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// stall all L1 request
|
|
transition(L2_MV, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
|
|
z_stall;
|
|
}
|
|
|
|
transition(L2_MV, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_MV, Proc_last_int_ack, L2_MT) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
hh_issueStoreHit;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// Transitions from L2_IM, waiting for external data before going to MT state
|
|
// could see forwards and/or more L1 requests
|
|
transition(L2_IM, L2_INV) { // could see an invalidate from the directory (earlier epoch)
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IM, {Forwarded_GETS,Forwarded_GET_INSTR}, L2_IMO) { // could see Forwards, if directory responses get out-of-order
|
|
dd_recordGetSForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IM, {L1_GETS,L1_GET_INSTR}, L2_IMO) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
ss_recordGetSL1ID;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_IM, Forwarded_GETX, L2_IMI) { // could see Forwards, if directory requests get ahead of responses
|
|
ii_recordGetXForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IM, L1_GETX, L2_IMZ) { // don't go there, just go to stall state
|
|
z_stall;
|
|
}
|
|
|
|
transition(L2_IM, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MT) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
hh_issueStoreHit;
|
|
c_finalAckToDirIfNeeded;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IM, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IM, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IM, Proc_last_ext_ack, L2_MT) {
|
|
hh_issueStoreHit;
|
|
n_sendFinalAckIfThreeHop;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// transitions from L2_IMO
|
|
transition(L2_IMO, L2_INV) { // could see an invalidate from the directory (earlier epoch)
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IMO, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
|
|
dd_recordGetSForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IMO, Forwarded_GETX, L2_IMOI) { // could see Forwards
|
|
ii_recordGetXForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IMO, {L1_GETS,L1_GET_INSTR}) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
ss_recordGetSL1ID;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_IMO, L1_GETX, L2_IMOZ) {
|
|
z_stall;
|
|
}
|
|
|
|
transition(L2_IMO, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MO) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
cc_issueStoreHitDG;
|
|
ddd_setPendingIntAcksToOne;
|
|
c_finalAckToDirIfNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMO, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMO, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMO, Proc_last_ext_ack, L2_MO) {
|
|
n_sendFinalAckIfThreeHop;
|
|
cc_issueStoreHitDG;
|
|
ddd_setPendingIntAcksToOne;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// transitions from L2_IMI
|
|
// the directory put us in this state so it should tell us nothing (i.e. don't worry about INV or Forwards)
|
|
// stall all L1 request
|
|
transition(L2_IMI, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MIC) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
pp_issueStoreHitInv;
|
|
ddd_setPendingIntAcksToOne;
|
|
c_finalAckToDirIfNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMI, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMI, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMI, Proc_last_ext_ack, L2_MIC) {
|
|
n_sendFinalAckIfThreeHop;
|
|
pp_issueStoreHitInv;
|
|
ddd_setPendingIntAcksToOne;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// transistions from L2_IMZ
|
|
// just wait for all acks and data
|
|
// stall on all requests
|
|
// NOTE: A performance option might be possible to go into M state instead of MT
|
|
transition(L2_IMZ, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MT) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
hh_issueStoreHit;
|
|
c_finalAckToDirIfNeeded;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMZ, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMZ, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMZ, Proc_last_ext_ack, L2_MT) {
|
|
hh_issueStoreHit;
|
|
n_sendFinalAckIfThreeHop;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMZ, L2_INV) { // could see an invalidate from the directory (earlier epoch)
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IMZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// transitions from L2_IMOI
|
|
// the directory put us in this state so it should tell us nothing (i.e. don't worry about INV or Forwards)
|
|
// stall all L1 requests
|
|
transition(L2_IMOI, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MOICR) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
pp_issueStoreHitInv;
|
|
ddd_setPendingIntAcksToOne;
|
|
c_finalAckToDirIfNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOI, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOI, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOI, Proc_last_ext_ack, L2_MOICR) {
|
|
n_sendFinalAckIfThreeHop;
|
|
pp_issueStoreHitInv;
|
|
ddd_setPendingIntAcksToOne;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// transitions from L2_IMOZ
|
|
// just wait for all acks and data
|
|
// stall on all requests
|
|
transition(L2_IMOZ, L2_INV) { // could see an invalidate from the directory (earlier epoch)
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_IMOZ, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MOZ) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
cc_issueStoreHitDG;
|
|
ddd_setPendingIntAcksToOne;
|
|
c_finalAckToDirIfNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOZ, Data_ext_ack_not_0) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
p_addNumberOfPendingExtAcks;
|
|
mm_rememberIfFinalAckNeeded;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOZ, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_IMOZ, Proc_last_ext_ack, L2_MOZ) {
|
|
cc_issueStoreHitDG;
|
|
ddd_setPendingIntAcksToOne;
|
|
n_sendFinalAckIfThreeHop;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// stall on all requests
|
|
transition(L2_IMOZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// ===============================================
|
|
// BASE STATE - S
|
|
// Transitions from S, no L1 copies
|
|
transition(L2_S, L2_Replacement, L2_I) {
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(L2_S, L2_INV, L2_I) { // could see an invalidate from the directory, but not Forwards
|
|
t_sendAckToInvalidator;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_S, {L1_GETS, L1_GET_INSTR}, L2_SS) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
k_dataFromL2CacheToL1Requestor;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_S, L1_GETX, L2_IM) {
|
|
set_setMRU;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
b_issueGETX;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// BASE STATE - SS
|
|
// Transitions from SS, L1 copies
|
|
transition(L2_SS, L2_Replacement, L2_SIV) {
|
|
i_allocateTBE; // for internal request
|
|
bbb_setPendingIntAcksToSharers;
|
|
tt_issueSharedInvalidateIntL1copiesRequest;
|
|
}
|
|
|
|
transition(L2_SS, L2_INV, L2_SIC) {
|
|
i_allocateTBE; // for internal request
|
|
yy_recordInvalidatorID;
|
|
bbb_setPendingIntAcksToSharers;
|
|
tt_issueSharedInvalidateIntL1copiesRequest;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_SS, {L1_GETS, L1_GET_INSTR}) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
k_dataFromL2CacheToL1Requestor;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SS, L1_UPGRADE_no_others, L2_IM) {
|
|
set_setMRU;
|
|
i_allocateTBE; // for both ext. and int.
|
|
xx_recordGetXL1ID;
|
|
b_issueGETX; // for external
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SS, L1_UPGRADE, L2_IMV) {
|
|
set_setMRU;
|
|
i_allocateTBE; // for both ext. and int.
|
|
xx_recordGetXL1ID;
|
|
ccc_setPendingIntAcksMinusOne;
|
|
vv_issueInvalidateOtherIntL1copiesRequest; // for internal
|
|
b_issueGETX; // for external
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SS, L1_GETX, L2_IMV) {
|
|
set_setMRU;
|
|
i_allocateTBE; // for both ext. and int.
|
|
xx_recordGetXL1ID;
|
|
bbb_setPendingIntAcksToSharers;
|
|
vv_issueInvalidateOtherIntL1copiesRequest; // for internal
|
|
nn_addSharer;
|
|
b_issueGETX; // for external
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SS, L1_PUTS) {
|
|
ww_profileMissNoDir;
|
|
w_sendPutAckToL1Cache;
|
|
kk_removeRequestSharer;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SS, L1_PUTS_last, L2_S) {
|
|
ww_profileMissNoDir;
|
|
w_sendPutAckToL1Cache;
|
|
kk_removeRequestSharer;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// Transitions from SIC - Initiated by an invalidate
|
|
transition(L2_SIC, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_SIC, Proc_last_int_ack, L2_I) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
zz_sendAckToQueuedInvalidator;
|
|
s_deallocateTBE;
|
|
}
|
|
|
|
transition(L2_SIC, L2_INV) { // could see an invalidate from the directory, but not Forwards
|
|
l_popForwardedRequestQueue; // ignore: already know an ack must be sent to the directory
|
|
}
|
|
|
|
transition(L2_SIC, {L1_GETS, L1_GET_INSTR, L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX}) { // stall on all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from SIV - initiated by a L2_Replacement
|
|
transition(L2_SIV, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_SIV, Proc_last_int_ack, L2_I) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
s_deallocateTBE;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(L2_SIV, L2_INV) { // could see an invalidate from the directory, but not Forwards
|
|
z_stall; // guarenteed to receive all acks thus moving the state to I where the L2_INV can be handled
|
|
}
|
|
|
|
transition(L2_SIV, {L1_GETS, L1_GET_INSTR, L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX}) { // stall on all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// ===============================================
|
|
// BASE STATE - M
|
|
// Transitions from M, no L1 copies
|
|
transition(L2_M, L2_Replacement, L2_MIN) {
|
|
i_allocateTBE;
|
|
d_issuePUTX;
|
|
x_copyDataFromL2CacheToTBE;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(L2_M, {Forwarded_GETS,Forwarded_GET_INSTR}, L2_O) { // can see forwards, not inv
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_M, Forwarded_GETX, L2_I) { // can see forwards, not inv
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_M, {L1_GETS, L1_GET_INSTR}, L2_SO) { // FIXME FOR BETTER PERFORMANCE - an E state would be nice here
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
k_dataFromL2CacheToL1Requestor;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_M, L1_GETX, L2_MT) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
k_dataFromL2CacheToL1Requestor;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// BASE STATE - MT
|
|
// Transitions from MT, M L1 copy
|
|
transition(L2_MT, L2_Replacement, L2_MIV) {
|
|
i_allocateTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
v_issueInvalidateIntL1copyRequest;
|
|
}
|
|
|
|
transition(L2_MT, {Forwarded_GETS, Forwarded_GET_INSTR}, L2_MO) { // can see forwards, not inv
|
|
i_allocateTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
g_issueDownGradeIntL1copiesRequest;
|
|
dd_recordGetSForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MT, {L1_GETS, L1_GET_INSTR}, L2_MO) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
i_allocateTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
g_issueDownGradeIntL1copiesRequest;
|
|
ss_recordGetSL1ID;
|
|
nn_addSharer;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_MT, Forwarded_GETX, L2_MIC) { // can see forwards, not inv
|
|
i_allocateTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
v_issueInvalidateIntL1copyRequest;
|
|
ii_recordGetXForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MT, L1_GETX, L2_MIT) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
i_allocateTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
v_issueInvalidateIntL1copyRequest;
|
|
nn_addSharer;
|
|
xx_recordGetXL1ID;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_MT, L1_PUTX_last, L2_M) {
|
|
ww_profileMissNoDir;
|
|
w_sendPutAckToL1Cache;
|
|
kk_removeRequestSharer;
|
|
m_writeDataFromRequestQueueToL2Cache;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// Transitions from L2_MIV, waiting for local L1 response
|
|
transition(L2_MIV, Data_int_ack, L2_MIN) {
|
|
aa_removeResponseSharer;
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
bb_dataFromL2CacheToGetSForwardIDs; // likely won't send any messages
|
|
gg_dataFromL2CacheToGetXForwardID; // likely won't send any messages
|
|
d_issuePUTX;
|
|
x_copyDataFromL2CacheToTBE;
|
|
rr_deallocateL2CacheBlock;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_MIV, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
|
|
dd_recordGetSForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MIV, Forwarded_GETX) { // could see Forwards
|
|
ii_recordGetXForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MIV, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall on all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_MIN, waiting for directory ack
|
|
transition(L2_MIN, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
|
|
y_dataFromTBEToRequestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MIN, Forwarded_GETX) { // could see Forwards
|
|
y_dataFromTBEToRequestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MIN, Dir_WB_ack, L2_I) {
|
|
s_deallocateTBE;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MIN, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_MIC, waiting for local L1 response
|
|
// Directory put us in this state with a forwarded GetX
|
|
// therefore we shouldn't see anymore forwards
|
|
// we stall on all L1 requests
|
|
transition(L2_MIC, Data_int_ack, L2_I) {
|
|
aa_removeResponseSharer;
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
gg_dataFromL2CacheToGetXForwardID;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_MIC, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_MIT, waiting for local L1 response
|
|
// A local L1 request put us in this state, so any request are possible
|
|
// we currently stall all requests because of the ugly recursive path it could lead us on
|
|
// removing some of the blocking here could have major performance benefits
|
|
// however one must be careful not to violate cache coherence
|
|
transition(L2_MIT, Data_int_ack, L2_MT) {
|
|
aa_removeResponseSharer;
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
hh_issueStoreHit; // internal requestor
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// stall all requests
|
|
transition(L2_MIT, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// Transistion from L2_MO, waiting for local L1 data response
|
|
// a GetS request put us in this state
|
|
// stall must stall if we get a GETX request
|
|
transition(L2_MO, Data_int_ack, L2_SO) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
ee_dataFromL2CacheToGetSIDs; // could be an internal or external requestor
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_MO, {Forwarded_GETS, Forwarded_GET_INSTR}) { // can see forwards, not inv
|
|
dd_recordGetSForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MO, {L1_GETS, L1_GET_INSTR}) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
ss_recordGetSL1ID;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_MO, Forwarded_GETX, L2_MOIC) { // can see forwards, not inv
|
|
ii_recordGetXForwardID;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_MO, {L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}, L2_MOZ) { // don't go there, just go to a stall state
|
|
z_stall;
|
|
}
|
|
|
|
// Transistion from L2_MOIC
|
|
// a Forwarded_GETX put us here so we should not see any more forwards
|
|
// stall on all L1 requests, once data is received send new data to all queued up L1 shares
|
|
// then immediately send invalidate request to those new L1 shared copies
|
|
//
|
|
// KEY DIFFERENCE: L2_MOICR assumes the L1 data responder moved to I state and removes the sharer,
|
|
// while L2_MOIC assumes the L1 data responder moved to S state and doesn't remove the sharer
|
|
transition(L2_MOIC, Data_int_ack, L2_OIC) { // need only one ack
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
ee_dataFromL2CacheToGetSIDs;
|
|
bbb_setPendingIntAcksToSharers;
|
|
tt_issueSharedInvalidateIntL1copiesRequest;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_MOIC, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
|
|
z_stall;
|
|
}
|
|
|
|
// Transistion from L2_MOICR
|
|
// a Forwarded_GETX put us here so we should not see any more forwards
|
|
// stall on all L1 requests, once data is received send new data to all queued up L1 shares
|
|
// then immediately send invalidate request to those new L1 shared copies
|
|
//
|
|
// KEY DIFFERENCE: L2_MOICR assumes the L1 data responder moved to I state and removes the sharer,
|
|
// while L2_MOIC assumes the L1 data responder moved to S state and doesn't remove the sharer
|
|
transition(L2_MOICR, Data_int_ack, L2_OIC) { // need only one ack
|
|
aa_removeResponseSharer;
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
ee_dataFromL2CacheToGetSIDs;
|
|
bbb_setPendingIntAcksToSharers;
|
|
tt_issueSharedInvalidateIntL1copiesRequest;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_MOICR, {L1_GETS, L1_GET_INSTR, L1_GETX}) {
|
|
z_stall;
|
|
}
|
|
|
|
// L2_MOZ
|
|
// simply wait on data
|
|
// stall on everything
|
|
transition(L2_MOZ, Data_int_ack, L2_SO) {
|
|
u_writeDataFromResponseQueueToL2Cache;
|
|
ee_dataFromL2CacheToGetSIDs; // could be an internal or external requestor
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
// stall everything
|
|
transition(L2_MOZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
|
|
z_stall;
|
|
}
|
|
|
|
// ===============================================
|
|
// BASE STATE - O
|
|
// Transitions from L2_O, only block cached on the chip
|
|
transition(L2_O, L2_Replacement, L2_OIN){
|
|
i_allocateTBE;
|
|
x_copyDataFromL2CacheToTBE;
|
|
d_issuePUTX;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(L2_O, {Forwarded_GETS,Forwarded_GET_INSTR}) {
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_O, Forwarded_GETX, L2_I) {
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_O, {L1_GETS, L1_GET_INSTR}, L2_SO) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
k_dataFromL2CacheToL1Requestor;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_O, L1_GETX, L2_OM) {
|
|
set_setMRU;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
b_issueGETX;
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// BASE STATE - SO
|
|
// Transitions from L2_SO, other valid L1 cached copies
|
|
transition(L2_SO, L2_Replacement, L2_OIV){
|
|
i_allocateTBE;
|
|
x_copyDataFromL2CacheToTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
tt_issueSharedInvalidateIntL1copiesRequest;
|
|
}
|
|
|
|
transition(L2_SO, {Forwarded_GETS,Forwarded_GET_INSTR}) {
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, Forwarded_GETX, L2_OIC) {
|
|
i_allocateTBE;
|
|
bbb_setPendingIntAcksToSharers;
|
|
ii_recordGetXForwardID;
|
|
tt_issueSharedInvalidateIntL1copiesRequest;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, {L1_GETS, L1_GET_INSTR}) {
|
|
set_setMRU;
|
|
ww_profileMissNoDir;
|
|
nn_addSharer;
|
|
k_dataFromL2CacheToL1Requestor;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, L1_UPGRADE, L2_OMV) {
|
|
set_setMRU;
|
|
nn_addSharer;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
ccc_setPendingIntAcksMinusOne;
|
|
vv_issueInvalidateOtherIntL1copiesRequest; // for internal
|
|
b_issueGETX; // for external
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, L1_UPGRADE_no_others, L2_OM) {
|
|
set_setMRU;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
b_issueGETX; // for external
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, L1_GETX, L2_OMV) {
|
|
set_setMRU;
|
|
i_allocateTBE;
|
|
xx_recordGetXL1ID;
|
|
bbb_setPendingIntAcksToSharers;
|
|
vv_issueInvalidateOtherIntL1copiesRequest;
|
|
nn_addSharer;
|
|
b_issueGETX; // for external
|
|
uu_profileMiss;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, {L1_PUTS, L1_PUTX}) { // PUTX possible because L2 downgraded before seeing PUTX
|
|
ww_profileMissNoDir;
|
|
w_sendPutAckToL1Cache;
|
|
kk_removeRequestSharer;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
transition(L2_SO, {L1_PUTS_last, L1_PUTX_last}, L2_O) { // PUTX possible because L2 downgraded before seeing PUTX
|
|
ww_profileMissNoDir;
|
|
w_sendPutAckToL1Cache;
|
|
kk_removeRequestSharer;
|
|
jj_popL1RequestQueue;
|
|
}
|
|
|
|
// Transitions from L2_OIV
|
|
// L2 replacement put us here, we must stall all L1 requests
|
|
transition(L2_OIV, {Forwarded_GETS, Forwarded_GET_INSTR}) {
|
|
y_dataFromTBEToRequestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OIV, Forwarded_GETX) {
|
|
z_stall;
|
|
}
|
|
|
|
transition(L2_OIV, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OIV, Proc_last_int_ack, L2_OIN) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks
|
|
o_popIncomingResponseQueue;
|
|
d_issuePUTX;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(L2_OIV, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// transitions from L2_OIN
|
|
// L2 replacement put us here, we must stall all L1 requests
|
|
transition(L2_OIN, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
|
|
y_dataFromTBEToRequestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OIN, Dir_WB_ack, L2_I) {
|
|
s_deallocateTBE;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OIN, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// transitions from L2_OIC
|
|
// directory put us in this state, should not see any forwards
|
|
// we must stall all L1 requests
|
|
transition(L2_OIC, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OIC, Proc_last_int_ack, L2_I) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks
|
|
gg_dataFromL2CacheToGetXForwardID;
|
|
s_deallocateTBE;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OIC, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_OMV,
|
|
// int_acks needed
|
|
// waiting to see our Forwarded GETX from the directory
|
|
// if we see the Forwarded GETX before all invalidates received, stall
|
|
// stall all L1 requests
|
|
transition(L2_OMV, Proc_int_ack) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OMV, Proc_last_int_ack, L2_OM) {
|
|
aa_removeResponseSharer;
|
|
r_decrementNumberOfPendingIntAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OMV, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OMV, {Forwarded_GETS, Forwarded_GET_INSTR}) { // these are GetS that beat us to the directory
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OMV, Dir_exe_ack, L2_MV) {
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OMV, Forwarded_GETX) { // the Forwarded GetX may or may not be ours, we can't respond until int_acks received
|
|
z_stall;
|
|
}
|
|
|
|
transition(L2_OMV, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETS, L1_GET_INSTR, L1_GETX}) { // must stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
// Transitions from L2_OM,
|
|
// all L1 copies invalid, no int_acks needed
|
|
// waiting to see our Forwarded GETX from the directory
|
|
// once we see the Forwarded GETX, we can move to IM and wait for the data_ack
|
|
// stall all L1 requests
|
|
transition(L2_OM, Proc_ext_ack) {
|
|
q_decrementNumberOfPendingExtAcks;
|
|
o_popIncomingResponseQueue;
|
|
}
|
|
|
|
transition(L2_OM, {Forwarded_GETS, Forwarded_GET_INSTR}) { // these are GetS that beat us to the directory
|
|
e_dataFromL2CacheToL2Requestor;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OM, Forwarded_GETX, L2_IM) { // the Forwarded GetX may or may not be ours
|
|
e_dataFromL2CacheToL2Requestor; // we're probably sending a message to ourselves here, but not guarenteed
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OM, Dir_exe_ack, L2_MT) { // Directory tells us we already have an exclusive copy
|
|
hh_issueStoreHit;
|
|
s_deallocateTBE;
|
|
l_popForwardedRequestQueue;
|
|
}
|
|
|
|
transition(L2_OM, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETS, L1_GET_INSTR, L1_GETX}) { // must stall all L1 requests
|
|
z_stall;
|
|
}
|
|
|
|
}
|