a317764577
This patch is imported from reviewboard patch 2551 by Nilay. This patch moves from a dynamically defined MachineType to a statically defined one. The need for this patch was felt since a dynamically defined type prevents us from having types for which no machine definition may exist. The following changes have been made: i. each machine definition now uses a type from the MachineType enumeration instead of any random identifier. This required changing the grammar and the *.sm files. ii. MachineType enumeration defined statically in RubySlicc_Exports.sm. * * * normal protocol fixes for nilay's parser machine type fix
1328 lines
43 KiB
Plaintext
1328 lines
43 KiB
Plaintext
/*
|
|
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are
|
|
* met: redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer;
|
|
* redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution;
|
|
* neither the name of the copyright holders nor the names of its
|
|
* contributors may be used to endorse or promote products derived from
|
|
* this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
machine(MachineType:L1Cache, "Directory protocol")
|
|
: Sequencer * sequencer;
|
|
CacheMemory * L1Icache;
|
|
CacheMemory * L1Dcache;
|
|
int l2_select_num_bits;
|
|
Cycles request_latency := 2;
|
|
Cycles use_timeout_latency := 50;
|
|
bool send_evictions;
|
|
|
|
// Message Queues
|
|
// From this node's L1 cache TO the network
|
|
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
|
|
MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
|
|
vnet_type="request";
|
|
// a local L1 -> this L2 bank
|
|
MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
|
|
vnet_type="response";
|
|
|
|
// To this node's L1 cache FROM the network
|
|
// a L2 bank -> this L1
|
|
MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
|
|
vnet_type="request";
|
|
// a L2 bank -> this L1
|
|
MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
|
|
vnet_type="response";
|
|
|
|
MessageBuffer * triggerQueue;
|
|
|
|
MessageBuffer * mandatoryQueue;
|
|
{
|
|
// STATES
|
|
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
|
|
// Base states
|
|
I, AccessPermission:Invalid, desc="Idle";
|
|
S, AccessPermission:Read_Only, desc="Shared";
|
|
O, AccessPermission:Read_Only, desc="Owned";
|
|
M, AccessPermission:Read_Only, desc="Modified (dirty)";
|
|
M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
|
|
MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
|
|
MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
|
|
|
|
// Transient States
|
|
IM, AccessPermission:Busy, "IM", desc="Issued GetX";
|
|
SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
|
|
OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
|
|
IS, AccessPermission:Busy, "IS", desc="Issued GetS";
|
|
SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
|
|
OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
|
|
MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
|
|
II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
|
|
}
|
|
|
|
// EVENTS
|
|
enumeration(Event, desc="Cache events") {
|
|
Load, desc="Load request from the processor";
|
|
Ifetch, desc="I-fetch request from the processor";
|
|
Store, desc="Store request from the processor";
|
|
L1_Replacement, desc="Replacement";
|
|
|
|
// Requests
|
|
Own_GETX, desc="We observe our own GetX forwarded back to us";
|
|
Fwd_GETX, desc="A GetX from another processor";
|
|
Fwd_GETS, desc="A GetS from another processor";
|
|
Fwd_DMA, desc="A GetS from another processor";
|
|
Inv, desc="Invalidations from the directory";
|
|
|
|
// Responses
|
|
Ack, desc="Received an ack message";
|
|
Data, desc="Received a data message, responder has a shared copy";
|
|
Exclusive_Data, desc="Received a data message";
|
|
|
|
Writeback_Ack, desc="Writeback O.K. from directory";
|
|
Writeback_Ack_Data, desc="Writeback O.K. from directory";
|
|
Writeback_Nack, desc="Writeback not O.K. from directory";
|
|
|
|
// Triggers
|
|
All_acks, desc="Received all required data and message acks";
|
|
|
|
// Timeouts
|
|
Use_Timeout, desc="lockout period ended";
|
|
}
|
|
|
|
// TYPES
|
|
|
|
// CacheEntry
|
|
structure(Entry, desc="...", interface="AbstractCacheEntry") {
|
|
State CacheState, desc="cache state";
|
|
bool Dirty, desc="Is the data dirty (different than memory)?";
|
|
DataBlock DataBlk, desc="data for the block";
|
|
}
|
|
|
|
// TBE fields
|
|
structure(TBE, desc="...") {
|
|
Addr addr, desc="Physical address for this TBE";
|
|
State TBEState, desc="Transient state";
|
|
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
|
|
bool Dirty, desc="Is the data dirty (different than memory)?";
|
|
int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
|
|
}
|
|
|
|
structure(TBETable, external ="yes") {
|
|
TBE lookup(Addr);
|
|
void allocate(Addr);
|
|
void deallocate(Addr);
|
|
bool isPresent(Addr);
|
|
}
|
|
|
|
Tick clockEdge();
|
|
Tick cyclesToTicks(Cycles c);
|
|
void set_cache_entry(AbstractCacheEntry b);
|
|
void unset_cache_entry();
|
|
void set_tbe(TBE b);
|
|
void unset_tbe();
|
|
|
|
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
|
|
TimerTable useTimerTable;
|
|
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
|
|
|
|
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
|
|
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
|
|
if(is_valid(L1Dcache_entry)) {
|
|
return L1Dcache_entry;
|
|
}
|
|
|
|
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
|
|
return L1Icache_entry;
|
|
}
|
|
|
|
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
|
|
return static_cast(Entry, "pointer", L1Dcache.lookup(addr));
|
|
}
|
|
|
|
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
|
|
return static_cast(Entry, "pointer", L1Icache.lookup(addr));
|
|
}
|
|
|
|
State getState(TBE tbe, Entry cache_entry, Addr addr) {
|
|
if(is_valid(tbe)) {
|
|
return tbe.TBEState;
|
|
} else if (is_valid(cache_entry)) {
|
|
return cache_entry.CacheState;
|
|
}
|
|
return State:I;
|
|
}
|
|
|
|
void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
|
|
assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
|
|
|
|
if (is_valid(tbe)) {
|
|
tbe.TBEState := state;
|
|
}
|
|
|
|
if (is_valid(cache_entry)) {
|
|
if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
|
|
((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
|
|
((cache_entry.CacheState != State:S) && (state == State:S)) ||
|
|
((cache_entry.CacheState != State:O) && (state == State:O)) ) {
|
|
|
|
cache_entry.CacheState := state;
|
|
sequencer.checkCoherence(addr);
|
|
}
|
|
else {
|
|
cache_entry.CacheState := state;
|
|
}
|
|
}
|
|
}
|
|
|
|
AccessPermission getAccessPermission(Addr addr) {
|
|
TBE tbe := TBEs[addr];
|
|
if(is_valid(tbe)) {
|
|
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
|
|
return L1Cache_State_to_permission(tbe.TBEState);
|
|
}
|
|
|
|
Entry cache_entry := getCacheEntry(addr);
|
|
if(is_valid(cache_entry)) {
|
|
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
|
|
return L1Cache_State_to_permission(cache_entry.CacheState);
|
|
}
|
|
|
|
DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
|
|
return AccessPermission:NotPresent;
|
|
}
|
|
|
|
void setAccessPermission(Entry cache_entry, Addr addr, State state) {
|
|
if (is_valid(cache_entry)) {
|
|
cache_entry.changePermission(L1Cache_State_to_permission(state));
|
|
}
|
|
}
|
|
|
|
void functionalRead(Addr addr, Packet *pkt) {
|
|
Entry cache_entry := getCacheEntry(addr);
|
|
if(is_valid(cache_entry)) {
|
|
testAndRead(addr, cache_entry.DataBlk, pkt);
|
|
} else {
|
|
TBE tbe := TBEs[addr];
|
|
if(is_valid(tbe)) {
|
|
testAndRead(addr, tbe.DataBlk, pkt);
|
|
} else {
|
|
error("Data block missing!");
|
|
}
|
|
}
|
|
}
|
|
|
|
int functionalWrite(Addr addr, Packet *pkt) {
|
|
int num_functional_writes := 0;
|
|
|
|
Entry cache_entry := getCacheEntry(addr);
|
|
if(is_valid(cache_entry)) {
|
|
num_functional_writes := num_functional_writes +
|
|
testAndWrite(addr, cache_entry.DataBlk, pkt);
|
|
return num_functional_writes;
|
|
}
|
|
|
|
TBE tbe := TBEs[addr];
|
|
num_functional_writes := num_functional_writes +
|
|
testAndWrite(addr, tbe.DataBlk, pkt);
|
|
return num_functional_writes;
|
|
}
|
|
|
|
Event mandatory_request_type_to_event(RubyRequestType type) {
|
|
if (type == RubyRequestType:LD) {
|
|
return Event:Load;
|
|
} else if (type == RubyRequestType:IFETCH) {
|
|
return Event:Ifetch;
|
|
} else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
|
|
return Event:Store;
|
|
} else {
|
|
error("Invalid RubyRequestType");
|
|
}
|
|
}
|
|
|
|
// ** OUT_PORTS **
|
|
|
|
out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
|
|
out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
|
|
out_port(triggerQueue_out, TriggerMsg, triggerQueue);
|
|
|
|
// ** IN_PORTS **
|
|
|
|
// Use Timer
|
|
in_port(useTimerTable_in, Addr, useTimerTable) {
|
|
if (useTimerTable_in.isReady(clockEdge())) {
|
|
Addr readyAddress := useTimerTable.nextAddress();
|
|
trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
|
|
TBEs.lookup(readyAddress));
|
|
}
|
|
}
|
|
|
|
// Trigger Queue
|
|
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
|
|
if (triggerQueue_in.isReady(clockEdge())) {
|
|
peek(triggerQueue_in, TriggerMsg) {
|
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
|
trigger(Event:All_acks, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else {
|
|
error("Unexpected message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Nothing from the request network
|
|
|
|
// Request Network
|
|
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
|
|
if (requestNetwork_in.isReady(clockEdge())) {
|
|
peek(requestNetwork_in, RequestMsg, block_on="addr") {
|
|
assert(in_msg.Destination.isElement(machineID));
|
|
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
|
|
|
|
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
|
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
|
|
trigger(Event:Own_GETX, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else {
|
|
trigger(Event:Fwd_GETX, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
}
|
|
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
|
trigger(Event:Fwd_GETS, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
|
trigger(Event:Fwd_DMA, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
|
trigger(Event:Writeback_Ack, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
|
|
trigger(Event:Writeback_Ack_Data, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
|
trigger(Event:Writeback_Nack, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
|
trigger(Event:Inv, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else {
|
|
error("Unexpected message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Response Network
|
|
in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
|
|
if (responseToL1Cache_in.isReady(clockEdge())) {
|
|
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
|
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
|
trigger(Event:Ack, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
|
trigger(Event:Data, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
|
trigger(Event:Exclusive_Data, in_msg.addr,
|
|
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
|
} else {
|
|
error("Unexpected message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Nothing from the unblock network
|
|
// Mandatory Queue betweens Node's CPU and it's L1 caches
|
|
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
|
|
if (mandatoryQueue_in.isReady(clockEdge())) {
|
|
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
|
|
|
|
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
|
|
|
|
if (in_msg.Type == RubyRequestType:IFETCH) {
|
|
// ** INSTRUCTION ACCESS ***
|
|
|
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
|
if (is_valid(L1Icache_entry)) {
|
|
// The tag matches for the L1, so the L1 asks the L2 for it.
|
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
|
in_msg.LineAddress, L1Icache_entry,
|
|
TBEs[in_msg.LineAddress]);
|
|
} else {
|
|
|
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
|
// Check to see if it is in the OTHER L1
|
|
if (is_valid(L1Dcache_entry)) {
|
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
|
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
|
|
TBEs[in_msg.LineAddress]);
|
|
}
|
|
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
|
|
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
|
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
|
in_msg.LineAddress, L1Icache_entry,
|
|
TBEs[in_msg.LineAddress]);
|
|
} else {
|
|
// No room in the L1, so we need to make room in the L1
|
|
trigger(Event:L1_Replacement,
|
|
L1Icache.cacheProbe(in_msg.LineAddress),
|
|
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
|
|
TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
|
|
}
|
|
}
|
|
} else {
|
|
// *** DATA ACCESS ***
|
|
|
|
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
|
|
if (is_valid(L1Dcache_entry)) {
|
|
// The tag matches for the L1, so the L1 ask the L2 for it
|
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
|
in_msg.LineAddress, L1Dcache_entry,
|
|
TBEs[in_msg.LineAddress]);
|
|
} else {
|
|
|
|
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
|
|
// Check to see if it is in the OTHER L1
|
|
if (is_valid(L1Icache_entry)) {
|
|
// The block is in the wrong L1, put the request on the queue to the shared L2
|
|
trigger(Event:L1_Replacement, in_msg.LineAddress,
|
|
L1Icache_entry, TBEs[in_msg.LineAddress]);
|
|
}
|
|
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
|
|
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
|
|
trigger(mandatory_request_type_to_event(in_msg.Type),
|
|
in_msg.LineAddress, L1Dcache_entry,
|
|
TBEs[in_msg.LineAddress]);
|
|
} else {
|
|
// No room in the L1, so we need to make room in the L1
|
|
trigger(Event:L1_Replacement,
|
|
L1Dcache.cacheProbe(in_msg.LineAddress),
|
|
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
|
|
TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
// ACTIONS
|
|
|
|
action(a_issueGETS, "a", desc="Issue GETS") {
|
|
peek(mandatoryQueue_in, RubyRequest) {
|
|
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceRequestType:GETS;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
out_msg.AccessMode := in_msg.AccessMode;
|
|
out_msg.Prefetch := in_msg.Prefetch;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(b_issueGETX, "b", desc="Issue GETX") {
|
|
peek(mandatoryQueue_in, RubyRequest) {
|
|
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceRequestType:GETX;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
out_msg.AccessMode := in_msg.AccessMode;
|
|
out_msg.Prefetch := in_msg.Prefetch;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(d_issuePUTX, "d", desc="Issue PUTX") {
|
|
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceRequestType:PUTX;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
|
|
action(dd_issuePUTO, "\d", desc="Issue PUTO") {
|
|
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceRequestType:PUTO;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
|
|
action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
|
|
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceRequestType:PUTS;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.RequestorMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
|
|
action(e_sendData, "e", desc="Send data from cache to requestor") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
assert(is_valid(cache_entry));
|
|
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.DataBlk := cache_entry.DataBlk;
|
|
// out_msg.Dirty := cache_entry.Dirty;
|
|
out_msg.Dirty := false;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
DPRINTF(RubySlicc, "Sending data to L2: %#x\n", in_msg.addr);
|
|
}
|
|
else {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := cache_entry.DataBlk;
|
|
// out_msg.Dirty := cache_entry.Dirty;
|
|
out_msg.Dirty := false;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
|
}
|
|
DPRINTF(RubySlicc, "Sending data to L1\n");
|
|
}
|
|
}
|
|
}
|
|
|
|
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
assert(is_valid(cache_entry));
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.DataBlk := cache_entry.DataBlk;
|
|
out_msg.Dirty := cache_entry.Dirty;
|
|
out_msg.Acks := 0; // irrelevant
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
|
|
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
assert(is_valid(cache_entry));
|
|
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.DataBlk := cache_entry.DataBlk;
|
|
out_msg.Dirty := cache_entry.Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
|
|
}
|
|
else {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := cache_entry.DataBlk;
|
|
out_msg.Dirty := cache_entry.Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
|
}
|
|
DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
|
|
}
|
|
}
|
|
}
|
|
|
|
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.Acks := 0 - 1; // -1
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
}
|
|
}
|
|
else {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.Acks := 0 - 1; // -1
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(g_sendUnblock, "g", desc="Send unblock to memory") {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
|
}
|
|
}
|
|
|
|
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.MessageSize := MessageSizeType:Unblock_Control;
|
|
}
|
|
}
|
|
|
|
action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
|
|
assert(is_valid(cache_entry));
|
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
|
L1Dcache.setMRU(cache_entry);
|
|
sequencer.readCallback(address, cache_entry.DataBlk);
|
|
}
|
|
|
|
action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
|
|
assert(is_valid(cache_entry));
|
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
|
L1Icache.setMRU(cache_entry);
|
|
sequencer.readCallback(address, cache_entry.DataBlk);
|
|
}
|
|
|
|
action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
|
|
assert(is_valid(cache_entry));
|
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
|
L1Icache.setMRU(address);
|
|
L1Dcache.setMRU(address);
|
|
sequencer.readCallback(address, cache_entry.DataBlk, true);
|
|
}
|
|
|
|
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
|
|
assert(is_valid(cache_entry));
|
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
|
L1Dcache.setMRU(cache_entry);
|
|
sequencer.writeCallback(address, cache_entry.DataBlk);
|
|
cache_entry.Dirty := true;
|
|
}
|
|
|
|
action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
|
|
assert(is_valid(cache_entry));
|
|
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
|
|
L1Icache.setMRU(address);
|
|
L1Dcache.setMRU(address);
|
|
sequencer.writeCallback(address, cache_entry.DataBlk, true);
|
|
cache_entry.Dirty := true;
|
|
}
|
|
|
|
action(i_allocateTBE, "i", desc="Allocate TBE") {
|
|
check_allocate(TBEs);
|
|
TBEs.allocate(address);
|
|
set_tbe(TBEs[address]);
|
|
assert(is_valid(cache_entry));
|
|
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
|
|
tbe.Dirty := cache_entry.Dirty;
|
|
}
|
|
|
|
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
|
|
triggerQueue_in.dequeue(clockEdge());
|
|
}
|
|
|
|
action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
|
|
useTimerTable.unset(address);
|
|
}
|
|
|
|
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
|
|
mandatoryQueue_in.dequeue(clockEdge());
|
|
}
|
|
|
|
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
|
|
requestNetwork_in.dequeue(clockEdge());
|
|
}
|
|
|
|
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
|
peek(responseToL1Cache_in, ResponseMsg) {
|
|
assert(is_valid(tbe));
|
|
DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
|
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
|
|
}
|
|
}
|
|
|
|
action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
assert(is_valid(tbe));
|
|
tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
|
|
}
|
|
}
|
|
|
|
action(n_popResponseQueue, "n", desc="Pop response queue") {
|
|
responseToL1Cache_in.dequeue(clockEdge());
|
|
}
|
|
|
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
|
assert(is_valid(tbe));
|
|
if (tbe.NumPendingMsgs == 0) {
|
|
enqueue(triggerQueue_out, TriggerMsg) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := TriggerType:ALL_ACKS;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
|
|
useTimerTable.set(address,
|
|
clockEdge() + cyclesToTicks(use_timeout_latency));
|
|
}
|
|
|
|
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DMA_ACK;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.Dirty := false;
|
|
out_msg.Acks := 1;
|
|
out_msg.MessageSize := MessageSizeType:Response_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
assert(is_valid(tbe));
|
|
if (in_msg.RequestorMachine == MachineType:L1Cache ||
|
|
in_msg.RequestorMachine == MachineType:DMA) {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := tbe.DataBlk;
|
|
// out_msg.Dirty := tbe.Dirty;
|
|
out_msg.Dirty := false;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
|
}
|
|
}
|
|
else {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.DataBlk := tbe.DataBlk;
|
|
// out_msg.Dirty := tbe.Dirty;
|
|
out_msg.Dirty := false;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
|
|
peek(requestNetwork_in, RequestMsg) {
|
|
assert(is_valid(tbe));
|
|
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.DataBlk := tbe.DataBlk;
|
|
out_msg.Dirty := tbe.Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
|
|
}
|
|
}
|
|
else {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
out_msg.addr := address;
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.DataBlk := tbe.DataBlk;
|
|
out_msg.Dirty := tbe.Dirty;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// L2 will usually request data for a writeback
|
|
action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
|
|
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
|
assert(is_valid(tbe));
|
|
out_msg.addr := address;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:L1Cache;
|
|
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
|
l2_select_low_bit, l2_select_num_bits, intToID(0)));
|
|
out_msg.Dirty := tbe.Dirty;
|
|
if (tbe.Dirty) {
|
|
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
|
|
} else {
|
|
out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
|
|
}
|
|
out_msg.DataBlk := tbe.DataBlk;
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Data;
|
|
}
|
|
}
|
|
|
|
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
|
|
TBEs.deallocate(address);
|
|
unset_tbe();
|
|
}
|
|
|
|
action(u_writeDataToCache, "u", desc="Write data to cache") {
|
|
peek(responseToL1Cache_in, ResponseMsg) {
|
|
assert(is_valid(cache_entry));
|
|
cache_entry.DataBlk := in_msg.DataBlk;
|
|
cache_entry.Dirty := in_msg.Dirty;
|
|
|
|
if (in_msg.Type == CoherenceResponseType:DATA) {
|
|
//assert(in_msg.Dirty == false);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
|
|
peek(responseToL1Cache_in, ResponseMsg) {
|
|
assert(is_valid(cache_entry));
|
|
assert(cache_entry.DataBlk == in_msg.DataBlk);
|
|
cache_entry.DataBlk := in_msg.DataBlk;
|
|
cache_entry.Dirty := in_msg.Dirty;
|
|
}
|
|
}
|
|
|
|
action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
|
|
if (L1Dcache.isTagPresent(address)) {
|
|
L1Dcache.deallocate(address);
|
|
} else {
|
|
L1Icache.deallocate(address);
|
|
}
|
|
unset_cache_entry();
|
|
}
|
|
|
|
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
|
|
if ((is_invalid(cache_entry))) {
|
|
set_cache_entry(L1Dcache.allocate(address, new Entry));
|
|
}
|
|
}
|
|
|
|
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
|
|
if ((is_invalid(cache_entry))) {
|
|
set_cache_entry(L1Icache.allocate(address, new Entry));
|
|
}
|
|
}
|
|
|
|
action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
|
|
if (send_evictions) {
|
|
DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
|
|
sequencer.evictionCallback(address);
|
|
}
|
|
}
|
|
|
|
action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
|
|
++L1Icache.demand_misses;
|
|
}
|
|
|
|
action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
|
|
++L1Icache.demand_hits;
|
|
}
|
|
|
|
action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
|
|
++L1Dcache.demand_misses;
|
|
}
|
|
|
|
action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
|
|
++L1Dcache.demand_hits;
|
|
}
|
|
|
|
action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
|
|
requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
|
|
}
|
|
|
|
action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
|
|
mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
|
|
}
|
|
|
|
//*****************************************************
|
|
// TRANSITIONS
|
|
//*****************************************************
|
|
|
|
// Transitions for Load/Store/L2_Replacement from transient states
|
|
transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
|
|
zz_recycleMandatoryQueue;
|
|
}
|
|
|
|
transition({M_W, MM_W}, L1_Replacement) {
|
|
zz_recycleMandatoryQueue;
|
|
}
|
|
|
|
transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
|
|
z_recycleRequestQueue;
|
|
}
|
|
|
|
transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
|
|
zz_recycleMandatoryQueue;
|
|
}
|
|
|
|
// Transitions from Idle
|
|
transition(I, Load, IS) {
|
|
ii_allocateL1DCacheBlock;
|
|
i_allocateTBE;
|
|
a_issueGETS;
|
|
uu_profileDataMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(I, Ifetch, IS) {
|
|
jj_allocateL1ICacheBlock;
|
|
i_allocateTBE;
|
|
a_issueGETS;
|
|
uu_profileInstMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(I, Store, IM) {
|
|
ii_allocateL1DCacheBlock;
|
|
i_allocateTBE;
|
|
b_issueGETX;
|
|
uu_profileDataMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(I, L1_Replacement) {
|
|
kk_deallocateL1CacheBlock;
|
|
}
|
|
|
|
transition(I, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
|
|
h_load_hit;
|
|
uu_profileDataHit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
|
|
h_ifetch_hit;
|
|
uu_profileInstHit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
// Transitions from Shared
|
|
transition(S, Store, SM) {
|
|
i_allocateTBE;
|
|
b_issueGETX;
|
|
uu_profileDataMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(S, L1_Replacement, SI) {
|
|
i_allocateTBE;
|
|
dd_issuePUTS;
|
|
forward_eviction_to_cpu;
|
|
kk_deallocateL1CacheBlock;
|
|
}
|
|
|
|
transition(S, Inv, I) {
|
|
f_sendAck;
|
|
forward_eviction_to_cpu;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(S, Fwd_GETS) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(S, Fwd_DMA) {
|
|
e_sendData;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from Owned
|
|
transition(O, Store, OM) {
|
|
i_allocateTBE;
|
|
b_issueGETX;
|
|
uu_profileDataMiss;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(O, L1_Replacement, OI) {
|
|
i_allocateTBE;
|
|
dd_issuePUTO;
|
|
forward_eviction_to_cpu;
|
|
kk_deallocateL1CacheBlock;
|
|
}
|
|
|
|
transition(O, Fwd_GETX, I) {
|
|
ee_sendDataExclusive;
|
|
forward_eviction_to_cpu;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(O, Fwd_GETS) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(O, Fwd_DMA) {
|
|
e_sendData;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from MM
|
|
transition({MM, MM_W}, Store) {
|
|
hh_store_hit;
|
|
uu_profileDataHit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(MM, L1_Replacement, MI) {
|
|
i_allocateTBE;
|
|
d_issuePUTX;
|
|
forward_eviction_to_cpu;
|
|
kk_deallocateL1CacheBlock;
|
|
}
|
|
|
|
transition(MM, Fwd_GETX, I) {
|
|
ee_sendDataExclusive;
|
|
forward_eviction_to_cpu;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MM, Fwd_GETS, I) {
|
|
ee_sendDataExclusive;
|
|
forward_eviction_to_cpu;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MM, Fwd_DMA, MM) {
|
|
e_sendData;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from M
|
|
transition(M, Store, MM) {
|
|
hh_store_hit;
|
|
uu_profileDataHit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(M_W, Store, MM_W) {
|
|
hh_store_hit;
|
|
uu_profileDataHit;
|
|
k_popMandatoryQueue;
|
|
}
|
|
|
|
transition(M, L1_Replacement, MI) {
|
|
i_allocateTBE;
|
|
d_issuePUTX;
|
|
forward_eviction_to_cpu;
|
|
kk_deallocateL1CacheBlock;
|
|
}
|
|
|
|
transition(M, Fwd_GETX, I) {
|
|
// e_sendData;
|
|
ee_sendDataExclusive;
|
|
forward_eviction_to_cpu;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(M, Fwd_GETS, O) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(M, Fwd_DMA) {
|
|
e_sendData;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from IM
|
|
|
|
transition(IM, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(IM, Ack) {
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(IM, {Exclusive_Data, Data}, OM) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
// Transitions from SM
|
|
transition(SM, Inv, IM) {
|
|
f_sendAck;
|
|
forward_eviction_to_cpu;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(SM, Ack) {
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(SM, {Data, Exclusive_Data}, OM) {
|
|
// v_writeDataToCacheVerify;
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(SM, Fwd_GETS) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(SM, Fwd_DMA) {
|
|
e_sendData;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from OM
|
|
transition(OM, Own_GETX) {
|
|
mm_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
|
|
// transition(OM, Fwd_GETX, OMF) {
|
|
transition(OM, Fwd_GETX, IM) {
|
|
ee_sendDataExclusive;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OM, Fwd_GETS) {
|
|
e_sendData;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OM, Fwd_DMA) {
|
|
e_sendData;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
//transition({OM, OMF}, Ack) {
|
|
transition(OM, Ack) {
|
|
m_decrementNumberOfMessages;
|
|
o_checkForCompletion;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(OM, All_acks, MM_W) {
|
|
xx_store_hit;
|
|
gg_sendUnblockExclusive;
|
|
s_deallocateTBE;
|
|
o_scheduleUseTimeout;
|
|
j_popTriggerQueue;
|
|
}
|
|
|
|
transition(MM_W, Use_Timeout, MM) {
|
|
jj_unsetUseTimer;
|
|
}
|
|
|
|
// Transitions from IS
|
|
|
|
transition(IS, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(IS, Data, S) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
hx_load_hit;
|
|
g_sendUnblock;
|
|
s_deallocateTBE;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(IS, Exclusive_Data, M_W) {
|
|
u_writeDataToCache;
|
|
m_decrementNumberOfMessages;
|
|
hx_load_hit;
|
|
gg_sendUnblockExclusive;
|
|
o_scheduleUseTimeout;
|
|
s_deallocateTBE;
|
|
n_popResponseQueue;
|
|
}
|
|
|
|
transition(M_W, Use_Timeout, M) {
|
|
jj_unsetUseTimer;
|
|
}
|
|
|
|
// Transitions from OI/MI
|
|
|
|
transition(MI, Fwd_GETS, OI) {
|
|
q_sendDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MI, Fwd_DMA) {
|
|
q_sendDataFromTBEToCache;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(MI, Fwd_GETX, II) {
|
|
q_sendExclusiveDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({SI, OI}, Fwd_GETS) {
|
|
q_sendDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({SI, OI}, Fwd_DMA) {
|
|
q_sendDataFromTBEToCache;
|
|
ub_dmaUnblockL2Cache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(OI, Fwd_GETX, II) {
|
|
q_sendExclusiveDataFromTBEToCache;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({SI, OI, MI}, Writeback_Ack_Data, I) {
|
|
qq_sendWBDataFromTBEToL2; // always send data
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({SI, OI, MI}, Writeback_Ack, I) {
|
|
g_sendUnblock;
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition({MI, OI}, Writeback_Nack, OI) {
|
|
// FIXME: This might cause deadlock by re-using the writeback
|
|
// channel, we should handle this case differently.
|
|
dd_issuePUTO;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// Transitions from II
|
|
transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
|
|
g_sendUnblock;
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
// transition({II, SI}, Writeback_Nack, I) {
|
|
transition(II, Writeback_Nack, I) {
|
|
s_deallocateTBE;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(SI, Writeback_Nack) {
|
|
dd_issuePUTS;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(II, Inv) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
|
|
transition(SI, Inv, II) {
|
|
f_sendAck;
|
|
l_popForwardQueue;
|
|
}
|
|
}
|