2f30950143
We eventually plan to replace the m5 cache hierarchy with the GEMS hierarchy, but for now we will make both live alongside eachother.
981 lines
31 KiB
Text
981 lines
31 KiB
Text
|
|
/*
|
|
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are
|
|
* met: redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer;
|
|
* redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution;
|
|
* neither the name of the copyright holders nor the names of its
|
|
* contributors may be used to endorse or promote products derived from
|
|
* this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
* $Id$
|
|
*
|
|
*/
|
|
|
|
machine(L1Cache, "Directory protocol") {
|
|
|
|
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
|
|
MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
|
|
MessageBuffer unblockFromCache, network="To", virtual_network="3", ordered="false";
|
|
|
|
MessageBuffer forwardToCache, network="From", virtual_network="1", ordered="false";
|
|
MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
|
|
|
|
// STATES
|
|
enumeration(State, desc="Cache states", default="L1Cache_State_I") {
|
|
// Base states
|
|
NP, desc="Not Present";
|
|
I, desc="Idle";
|
|
S, desc="Shared";
|
|
O, desc="Owned";
|
|
E, desc="Exclusive (clean)";
|
|
M, desc="Modified (dirty)";
|
|
MM, desc="Modified (dirty and locally modified)";
|
|
|
|
// Transient States
|
|
IM, "IM", desc="Issued GetX";
|
|
SM, "SM", desc="Issued GetX, we still have an old copy of the line";
|
|
OM, "OM", desc="Issued GetX, received data";
|
|
IS, "IS", desc="Issued GetS";
|
|
OI, "OI", desc="Issued PutO, waiting for ack";
|
|
MI, "MI", desc="Issued PutX, waiting for ack";
|
|
II, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
|
|
}
|
|
|
|
// EVENTS
|
|
enumeration(Event, desc="Cache events") {
|
|
Load, desc="Load request from the processor";
|
|
Ifetch, desc="I-fetch request from the processor";
|
|
Store, desc="Store request from the processor";
|
|
L2_Replacement, desc="Replacement";
|
|
L1_to_L2, desc="L1 to L2 transfer";
|
|
L2_to_L1D, desc="L2 to L1-Data transfer";
|
|
L2_to_L1I, desc="L2 to L1-Instruction transfer";
|
|
|
|
// Requests
|
|
Own_GETX, desc="We observe our own GetX forwarded back to us";
|
|
Fwd_GETX, desc="A GetX from another processor";
|
|
Fwd_GETS, desc="A GetS from another processor";
|
|
Inv, desc="Invalidations from the directory";
|
|
|
|
// Responses
|
|
Ack, desc="Received an ack message";
|
|
Data, desc="Received a data message, responder has a shared copy";
|
|
Exclusive_Data_Clean, desc="Received a data message, no other processor has it, data is clean";
|
|
Exclusive_Data_Dirty, desc="Received a data message, no other processor has it, data is dirty";
|
|
|
|
Writeback_Ack, desc="Writeback O.K. from directory";
|
|
Writeback_Nack, desc="Writeback not O.K. from directory";
|
|
|
|
// Triggers
|
|
All_acks, desc="Received all required data and message acks";
|
|
}
|
|
|
|
// TYPES
|
|
|
|
// CacheEntry
|
|
structure(Entry, desc="...", interface="AbstractCacheEntry") {
|
|
State CacheState, desc="cache state";
|
|
bool Dirty, desc="Is the data dirty (different than memory)?";
|
|
DataBlock DataBlk, desc="data for the block";
|
|
}
|
|
|
|
// TBE fields
|
|
structure(TBE, desc="...") {
|
|
State TBEState, desc="Transient state";
|
|
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
|
|
bool Dirty, desc="Is the data dirty (different than memory)?";
|
|
int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
|
|
}
|
|
|
|
external_type(CacheMemory) {
|
|
bool cacheAvail(Address);
|
|
Address cacheProbe(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
Entry lookup(Address);
|
|
void changePermission(Address, AccessPermission);
|
|
bool isTagPresent(Address);
|
|
}
|
|
|
|
external_type(TBETable) {
|
|
TBE lookup(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
bool isPresent(Address);
|
|
}
|
|
|
|
MessageBuffer mandatoryQueue, abstract_chip_ptr="true", ordered="false";
|
|
Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
|
|
StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
|
|
|
|
TBETable TBEs, template_hack="<L1Cache_TBE>";
|
|
CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
|
|
CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
|
|
CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
|
|
|
|
Entry getCacheEntry(Address addr), return_by_ref="yes" {
|
|
if (L2cacheMemory.isTagPresent(addr)) {
|
|
return L2cacheMemory[addr];
|
|
} else if (L1DcacheMemory.isTagPresent(addr)) {
|
|
return L1DcacheMemory[addr];
|
|
} else {
|
|
return L1IcacheMemory[addr];
|
|
}
|
|
}
|
|
|
|
void changePermission(Address addr, AccessPermission permission) {
|
|
if (L2cacheMemory.isTagPresent(addr)) {
|
|
return L2cacheMemory.changePermission(addr, permission);
|
|
} else if (L1DcacheMemory.isTagPresent(addr)) {
|
|
return L1DcacheMemory.changePermission(addr, permission);
|
|
} else {
|
|
return L1IcacheMemory.changePermission(addr, permission);
|
|
}
|
|
}
|
|
|
|
bool isCacheTagPresent(Address addr) {
|
|
return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
|
|
}
|
|
|
|
State getState(Address addr) {
|
|
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
|
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
|
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
|
|
|
if(TBEs.isPresent(addr)) {
|
|
return TBEs[addr].TBEState;
|
|
} else if (isCacheTagPresent(addr)) {
|
|
return getCacheEntry(addr).CacheState;
|
|
}
|
|
return State:NP;
|
|
}
|
|
|
|
void setState(Address addr, State state) {
|
|
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
|
|
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
|
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
|
|
|
|
if (TBEs.isPresent(addr)) {
|
|
TBEs[addr].TBEState := state;
|
|
}
|
|
|
|
if (isCacheTagPresent(addr)) {
|
|
getCacheEntry(addr).CacheState := state;
|
|
|
|
if (state == State:E) {
|
|
assert(getCacheEntry(addr).Dirty == false);
|
|
}
|
|
|
|
if ((state == State:M) || (state == State:MM)) {
|
|
assert(getCacheEntry(addr).Dirty == true);
|
|
}
|
|
|
|
// Set permission
|
|
if (state == State:MM) {
|
|
changePermission(addr, AccessPermission:Read_Write);
|
|
} else if ((state == State:S) ||
|
|
(state == State:O) ||
|
|
(state == State:M) ||
|
|
(state == State:E) ||
|
|
(state == State:SM) ||
|
|
(state == State:OM)) {
|
|
changePermission(addr, AccessPermission:Read_Only);
|
|
} else {
|
|
changePermission(addr, AccessPermission:Invalid);
|
|
}
|
|
}
|
|
}
|
|
|
|
Event mandatory_request_type_to_event(CacheRequestType type) {
|
|
if (type == CacheRequestType:LD) {
|
|
return Event:Load;
|
|
} else if (type == CacheRequestType:IFETCH) {
|
|
return Event:Ifetch;
|
|
} else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
|
|
return Event:Store;
|
|
} else {
|
|
error("Invalid CacheRequestType");
|
|
}
|
|
}
|
|
|
|
MessageBuffer triggerQueue, ordered="true";
|
|
|
|
// ** OUT_PORTS **
|
|
|
|
out_port(requestNetwork_out, RequestMsg, requestFromCache);
|
|
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
|
|
out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
|
|
out_port(triggerQueue_out, TriggerMsg, triggerQueue);
|
|
|
|
// ** IN_PORTS **
|
|
|
|
// Trigger Queue
|
|
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
|
|
if (triggerQueue_in.isReady()) {
|
|
peek(triggerQueue_in, TriggerMsg) {
|
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
|
trigger(Event:All_acks, in_msg.Address);
|
|
} else {
|
|
error("Unexpected message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Nothing from the request network
|
|
|
|
// Forward Network
|
|
in_port(forwardToCache_in, RequestMsg, forwardToCache) {
|
|
if (forwardToCache_in.isReady()) {
|
|
peek(forwardToCache_in, RequestMsg) {
|
|
if (in_msg.Type == CoherenceRequestType:GETX) {
|
|
if (in_msg.Requestor == machineID) {
|
|
trigger(Event:Own_GETX, in_msg.Address);
|
|
} else {
|
|
trigger(Event:Fwd_GETX, in_msg.Address);
|
|
}
|
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
|
trigger(Event:Fwd_GETS, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
|
trigger(Event:Inv, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
|
trigger(Event:Writeback_Ack, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
|
trigger(Event:Writeback_Nack, in_msg.Address);
|
|
} else {
|
|
error("Unexpected message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Response Network
|
|
in_port(responseToCache_in, ResponseMsg, responseToCache) {
|
|
if (responseToCache_in.isReady()) {
|
|
peek(responseToCache_in, ResponseMsg) {
|
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
|
trigger(Event:Ack, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
|
trigger(Event:Data, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_CLEAN) {
|
|
trigger(Event:Exclusive_Data_Clean, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_DIRTY) {
|
|
trigger(Event:Exclusive_Data_Dirty, in_msg.Address);
|
|
} else {
|
|
error("Unexpected message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Nothing from the unblock network
|
|
|
|
// Mandatory Queue
|
|
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
|
|
if (mandatoryQueue_in.isReady()) {
|
|
peek(mandatoryQueue_in, CacheMsg) {
|
|
|
|
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
|
|
|
if (in_msg.Type == CacheRequestType:IFETCH) {
|
|
// ** INSTRUCTION ACCESS ***
|
|
|
|
// Check to see if it is in the OTHER L1
|
|
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
|
|
// The block is in the wrong L1, try to write it to the L2
|
|
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
|
|
trigger(Event:L1_to_L2, in_msg.Address);
|
|
} else {
|
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
|
}
|
|
}
|
|
|
|
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
|
|
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
|
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
|
|
} else {
|
|
if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
|
|
// L1 does't have the line, but we have space for it in the L1
|
|
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
|
|
// L2 has it (maybe not with the right permissions)
|
|
trigger(Event:L2_to_L1I, in_msg.Address);
|
|
} else {
|
|
// We have room, the L2 doesn't have it, so the L1 fetches the line
|
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
|
|
}
|
|
} else {
|
|
// No room in the L1, so we need to make room
|
|
if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
|
|
// The L2 has room, so we move the line from the L1 to the L2
|
|
trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
|
|
} else {
|
|
// The L2 does not have room, so we replace a line from the L2
|
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
// *** DATA ACCESS ***
|
|
|
|
// Check to see if it is in the OTHER L1
|
|
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
|
|
// The block is in the wrong L1, try to write it to the L2
|
|
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
|
|
trigger(Event:L1_to_L2, in_msg.Address);
|
|
} else {
|
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
|
|
}
|
|
}
|
|
|
|
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
|
|
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
|
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
|
|
} else {
|
|
if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
|
|
// L1 does't have the line, but we have space for it in the L1
|
|
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
|
|
// L2 has it (maybe not with the right permissions)
|
|
trigger(Event:L2_to_L1D, in_msg.Address);
|
|
} else {
|
|
// We have room, the L2 doesn't have it, so the L1 fetches the line
|
|
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
|
|
}
|
|
} else {
|
|
// No room in the L1, so we need to make room
|
|
if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
|
|
// The L2 has room, so we move the line from the L1 to the L2
|
|
trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
|
|
} else {
|
|
// The L2 does not have room, so we replace a line from the L2
|
|
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// ACTIONS
|
|
|
|
action(a_issueGETS, "a", desc="Issue GETS") {
|
|
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:GETS;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
// TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
|
|
}
|
|
}
|
|
|
|
action(b_issueGETX, "b", desc="Issue GETX") {
|
|
enqueue(requestNetwork_out, RequestMsg, latency="(ISSUE_LATENCY-1)") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:GETX;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
// TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
|
|
}
|
|
}
|
|
|
|
action(d_issuePUTX, "d", desc="Issue PUTX") {
|
|
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:PUTX;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
|
|
action(dd_issuePUTO, "\d", desc="Issue PUTO") {
|
|
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:PUTO;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
|
|
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
|
peek(forwardToCache_in, RequestMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getCacheEntry(address).Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
|
|
peek(forwardToCache_in, RequestMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_DIRTY;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := getCacheEntry(address).DataBlk;
|
|
out_msg.Dirty := getCacheEntry(address).Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
|
|
peek(forwardToCache_in, RequestMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.Acks := 0 - 1; // -1
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(g_sendUnblock, "g", desc="Send unblock to memory") {
|
|
enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
|
}
|
|
}
|
|
|
|
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
|
|
enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
|
}
|
|
}
|
|
|
|
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
|
|
DEBUG_EXPR(getCacheEntry(address).DataBlk);
|
|
sequencer.readCallback(address, getCacheEntry(address).DataBlk);
|
|
}
|
|
|
|
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
|
|
DEBUG_EXPR(getCacheEntry(address).DataBlk);
|
|
sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
|
|
getCacheEntry(address).Dirty := true;
|
|
}
|
|
|
|
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
|
check_allocate(TBEs);
|
|
TBEs.allocate(address);
|
|
TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
|
|
TBEs[address].Dirty := getCacheEntry(address).Dirty;
|
|
}
|
|
|
|
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
|
|
triggerQueue_in.dequeue();
|
|
}
|
|
|
|
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
|
|
mandatoryQueue_in.dequeue();
|
|
}
|
|
|
|
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
|
|
forwardToCache_in.dequeue();
|
|
}
|
|
|
|
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
|
peek(responseToCache_in, ResponseMsg) {
|
|
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
|
|
}
|
|
}
|
|
|
|
action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
|
|
peek(forwardToCache_in, RequestMsg) {
|
|
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
|
|
}
|
|
}
|
|
|
|
action(n_popResponseQueue, "n", desc="Pop response queue") {
|
|
responseToCache_in.dequeue();
|
|
}
|
|
|
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
|
if (TBEs[address].NumPendingMsgs == 0) {
|
|
enqueue(triggerQueue_out, TriggerMsg) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := TriggerType:ALL_ACKS;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
|
|
peek(forwardToCache_in, RequestMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := TBEs[address].DataBlk;
|
|
out_msg.Dirty := TBEs[address].Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
|
|
enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
|
|
out_msg.Address := address;
|
|
out_msg.Sender := machineID;
|
|
out_msg.Destination.add(map_Address_to_Directory(address));
|
|
out_msg.Dirty := TBEs[address].Dirty;
|
|
if (TBEs[address].Dirty) {
|
|
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY;
|
|
out_msg.DataBlk := TBEs[address].DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
|
} else {
|
|
out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN;
|
|
// NOTE: in a real system this would not send data. We send
|
|
// data here only so we can check it at the memory
|
|
out_msg.DataBlk := TBEs[address].DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
|
TBEs.deallocate(address);
|
|
}
|
|
|
|
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
|
peek(responseToCache_in, ResponseMsg) {
|
|
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
|
getCacheEntry(address).Dirty := in_msg.Dirty;
|
|
}
|
|
}
|
|
|
|
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
|
|
peek(responseToCache_in, ResponseMsg) {
|
|
assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
|
|
getCacheEntry(address).DataBlk := in_msg.DataBlk;
|
|
getCacheEntry(address).Dirty := in_msg.Dirty;
|
|
}
|
|
}
|
|
|
|
action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
|
|
if (L1DcacheMemory.isTagPresent(address)) {
|
|
L1DcacheMemory.deallocate(address);
|
|
} else {
|
|
L1IcacheMemory.deallocate(address);
|
|
}
|
|
}
|
|
|
|
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
|
if (L1DcacheMemory.isTagPresent(address) == false) {
|
|
L1DcacheMemory.allocate(address);
|
|
}
|
|
}
|
|
|
|
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
|
|
if (L1IcacheMemory.isTagPresent(address) == false) {
|
|
L1IcacheMemory.allocate(address);
|
|
}
|
|
}
|
|
|
|
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
|
|
L2cacheMemory.allocate(address);
|
|
}
|
|
|
|
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
|
|
L2cacheMemory.deallocate(address);
|
|
}
|
|
|
|
action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
|
|
if (L1DcacheMemory.isTagPresent(address)) {
|
|
L2cacheMemory[address] := L1DcacheMemory[address];
|
|
} else {
|
|
L2cacheMemory[address] := L1IcacheMemory[address];
|
|
}
|
|
}
|
|
|
|
action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
|
|
if (L1DcacheMemory.isTagPresent(address)) {
|
|
L1DcacheMemory[address] := L2cacheMemory[address];
|
|
} else {
|
|
L1IcacheMemory[address] := L2cacheMemory[address];
|
|
}
|
|
}
|
|
|
|
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
|
|
peek(mandatoryQueue_in, CacheMsg) {
|
|
profile_miss(in_msg, id);
|
|
}
|
|
}
|
|
|
|
action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
|
|
mandatoryQueue_in.recycle();
|
|
}
|
|
|
|
//*****************************************************
|
|
// TRANSITIONS
|
|
//*****************************************************
|
|
|
|
// Transitions for Load/Store/L2_Replacement from transient states
|
|
transition({IM, SM, OM, IS, OI, MI, II}, {Store, L2_Replacement}) {
|
|
zz_recycleMandatoryQueue;
|
|
}
|
|
|
|
transition({IM, IS, OI, MI, II}, {Load, Ifetch}) {
|
|
zz_recycleMandatoryQueue;
|
|
}
|
|
|
|
transition({IM, SM, OM, IS, OI, MI, II}, L1_to_L2) {
|
|
zz_recycleMandatoryQueue;
|
|
}
|
|
|
|
// Transitions moving data between the L1 and L2 caches
|
|
transition({I, S, O, E, M, MM}, L1_to_L2) {
|
|
vv_allocateL2CacheBlock;
|
|
ss_copyFromL1toL2; // Not really needed for state I
|
|
kk_deallocateL1CacheBlock;
|
|
}
|
|
|
|
transition({I, S, O, E, M, MM}, L2_to_L1D) {
|
|
ii_allocateL1DCacheBlock;
|
|
tt_copyFromL2toL1; // Not really needed for state I
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition({I, S, O, E, M, MM}, L2_to_L1I) {
|
|
jj_allocateL1ICacheBlock;
|
|
tt_copyFromL2toL1; // Not really needed for state I
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
// Transitions from Idle
|
|
transition({NP, I}, Load, IS) {
|
|
ii_allocateL1DCacheBlock;
|
|
i_allocateTBE;
|
|
a_issueGETS;
|
|
uu_profileMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition({NP, I}, Ifetch, IS) {
|
|
jj_allocateL1ICacheBlock;
|
|
i_allocateTBE;
|
|
a_issueGETS;
|
|
uu_profileMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition({NP, I}, Store, IM) {
|
|
ii_allocateL1DCacheBlock;
|
|
i_allocateTBE;
|
|
b_issueGETX;
|
|
uu_profileMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(I, L2_Replacement) {
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition({NP, I}, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from Shared
|
|
transition({S, SM}, {Load, Ifetch}) {
|
|
h_load_hit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(S, Store, SM) {
|
|
i_allocateTBE;
|
|
b_issueGETX;
|
|
uu_profileMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(S, L2_Replacement, I) {
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(S, Inv, I) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from Owned
|
|
transition({O, OM}, {Load, Ifetch}) {
|
|
h_load_hit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(O, Store, OM) {
|
|
i_allocateTBE;
|
|
b_issueGETX;
|
|
// p_decrementNumberOfMessagesByOne;
|
|
uu_profileMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(O, L2_Replacement, OI) {
|
|
i_allocateTBE;
|
|
dd_issuePUTO;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(O, Fwd_GETX, I) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(O, Fwd_GETS) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from MM
|
|
transition(MM, {Load, Ifetch}) {
|
|
h_load_hit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(MM, Store) {
|
|
hh_store_hit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(MM, L2_Replacement, MI) {
|
|
i_allocateTBE;
|
|
d_issuePUTX;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition(MM, Fwd_GETX, I) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MM, Fwd_GETS, I) {
|
|
ee_sendDataExclusive;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from M
|
|
transition({E, M}, {Load, Ifetch}) {
|
|
h_load_hit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition({E, M}, Store, MM) {
|
|
hh_store_hit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition({E, M}, L2_Replacement, MI) {
|
|
i_allocateTBE;
|
|
d_issuePUTX;
|
|
rr_deallocateL2CacheBlock;
|
|
}
|
|
|
|
transition({E, M}, Fwd_GETX, I) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({E, M}, Fwd_GETS, O) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from IM
|
|
|
|
transition(IM, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(IM, Ack) {
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(IM, Data, OM) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
// Transitions from SM
|
|
transition(SM, Inv, IM) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(SM, Ack) {
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(SM, Data, OM) {
|
|
v_writeDataToCacheVerify;
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
// Transitions from OM
|
|
transition(OM, Own_GETX) {
|
|
mm_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OM, Fwd_GETX, IM) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OM, Fwd_GETS, OM) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OM, Ack) {
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(OM, All_acks, MM) {
|
|
hh_store_hit;
|
|
gg_sendUnblockExclusive;
|
|
s_deallocateTBE;
|
|
j_popTriggerQueue;
|
|
}
|
|
|
|
// Transitions from IS
|
|
|
|
transition(IS, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(IS, Data, S) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
h_load_hit;
|
|
g_sendUnblock;
|
|
s_deallocateTBE;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(IS, Exclusive_Data_Clean, E) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
h_load_hit;
|
|
gg_sendUnblockExclusive;
|
|
s_deallocateTBE;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(IS, Exclusive_Data_Dirty, M) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
h_load_hit;
|
|
gg_sendUnblockExclusive;
|
|
s_deallocateTBE;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
// Transitions from OI/MI
|
|
|
|
transition(MI, Fwd_GETS) {
|
|
q_sendDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MI, Fwd_GETX, II) {
|
|
q_sendDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OI, Fwd_GETS) {
|
|
q_sendDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OI, Fwd_GETX, II) {
|
|
q_sendDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({OI, MI}, Writeback_Ack, I) {
|
|
qq_sendDataFromTBEToMemory;
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MI, Writeback_Nack, OI) {
|
|
// FIXME: This might cause deadlock by re-using the writeback
|
|
// channel, we should handle this case differently.
|
|
dd_issuePUTO;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from II
|
|
transition(II, Writeback_Ack, I) {
|
|
g_sendUnblock;
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(II, Writeback_Nack, I) {
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(II, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
}
|
|
|