2009-11-19 01:34:31 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
|
2009-11-19 01:34:32 +01:00
|
|
|
* Copyright (c) 2009 Advanced Micro Devices, Inc.
|
2009-11-19 01:34:31 +01:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2009-11-19 01:34:32 +01:00
|
|
|
*
|
|
|
|
* AMD's contributions to the MOESI hammer protocol do not constitute an
|
|
|
|
* endorsement of its similarity to any AMD products.
|
|
|
|
*
|
|
|
|
* Authors: Milo Martin
|
|
|
|
* Brad Beckmann
|
2009-11-19 01:34:31 +01:00
|
|
|
*/
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
machine(Directory, "AMD Hammer-like protocol")
|
2010-01-30 05:29:19 +01:00
|
|
|
: DirectoryMemory * directory,
|
2010-08-20 20:46:14 +02:00
|
|
|
CacheMemory * probeFilter,
|
2010-01-30 05:29:19 +01:00
|
|
|
MemoryControl * memBuffer,
|
2010-08-20 20:46:14 +02:00
|
|
|
int memory_controller_latency = 2,
|
|
|
|
bool probe_filter_enabled = false
|
2009-11-19 01:34:32 +01:00
|
|
|
{
|
|
|
|
|
2010-03-22 05:22:21 +01:00
|
|
|
MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false";
|
|
|
|
MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
|
2009-11-19 01:34:32 +01:00
|
|
|
//
|
|
|
|
// For a finite buffered network, note that the DMA response network only
|
2010-03-22 05:22:21 +01:00
|
|
|
// works at this relatively lower numbered (lower priority) virtual network
|
2009-11-19 01:34:32 +01:00
|
|
|
// because the trigger queue decouples cache responses from DMA responses.
|
|
|
|
//
|
2010-03-22 05:22:21 +01:00
|
|
|
MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true";
|
2009-11-19 01:34:32 +01:00
|
|
|
|
2010-03-22 05:22:21 +01:00
|
|
|
MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
|
|
|
|
MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
|
2010-08-20 20:46:14 +02:00
|
|
|
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
|
2010-03-22 05:22:21 +01:00
|
|
|
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
|
2009-11-19 01:34:31 +01:00
|
|
|
|
|
|
|
// STATES
|
|
|
|
enumeration(State, desc="Directory states", default="Directory_State_E") {
|
|
|
|
// Base states
|
2010-08-20 20:46:14 +02:00
|
|
|
NX, desc="Not Owner, probe filter entry exists, block in O at Owner";
|
|
|
|
NO, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
|
|
|
|
S, desc="Data clean, probe filter entry exists pointing to the current owner";
|
|
|
|
O, desc="Data clean, probe filter entry exists";
|
|
|
|
E, desc="Exclusive Owner, no probe filter entry";
|
|
|
|
|
|
|
|
O_R, desc="Was data Owner, replacing probe filter entry";
|
|
|
|
S_R, desc="Was Not Owner or Sharer, replacing probe filter entry";
|
|
|
|
NO_R, desc="Was Not Owner or Sharer, replacing probe filter entry";
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
NO_B, "NO^B", desc="Not Owner, Blocked";
|
|
|
|
O_B, "O^B", desc="Owner, Blocked";
|
2009-11-19 01:34:32 +01:00
|
|
|
NO_B_W, desc="Not Owner, Blocked, waiting for Dram";
|
|
|
|
O_B_W, desc="Owner, Blocked, waiting for Dram";
|
|
|
|
NO_W, desc="Not Owner, waiting for Dram";
|
|
|
|
O_W, desc="Owner, waiting for Dram";
|
2009-11-19 01:34:32 +01:00
|
|
|
NO_DW_B_W, desc="Not Owner, Dma Write waiting for Dram and cache responses";
|
|
|
|
NO_DR_B_W, desc="Not Owner, Dma Read waiting for Dram and cache responses";
|
|
|
|
NO_DR_B_D, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
|
|
|
|
NO_DR_B, desc="Not Owner, Dma Read waiting for cache responses";
|
|
|
|
NO_DW_W, desc="Not Owner, Dma Write waiting for Dram";
|
|
|
|
O_DR_B_W, desc="Owner, Dma Read waiting for Dram and cache responses";
|
|
|
|
O_DR_B, desc="Owner, Dma Read waiting for cache responses";
|
2009-11-19 01:34:31 +01:00
|
|
|
WB, desc="Blocked on a writeback";
|
2009-11-19 01:34:32 +01:00
|
|
|
WB_O_W, desc="Blocked on memory write, will go to O";
|
|
|
|
WB_E_W, desc="Blocked on memory write, will go to E";
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Events
|
|
|
|
enumeration(Event, desc="Directory events") {
|
|
|
|
GETX, desc="A GETX arrives";
|
|
|
|
GETS, desc="A GETS arrives";
|
|
|
|
PUT, desc="A PUT arrives";
|
|
|
|
Unblock, desc="An unblock message arrives";
|
2010-08-20 20:46:14 +02:00
|
|
|
UnblockS, desc="An unblock message arrives";
|
|
|
|
UnblockM, desc="An unblock message arrives";
|
2009-11-19 01:34:31 +01:00
|
|
|
Writeback_Clean, desc="The final part of a PutX (no data)";
|
|
|
|
Writeback_Dirty, desc="The final part of a PutX (data)";
|
|
|
|
Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
|
|
|
|
Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
|
2009-11-19 01:34:32 +01:00
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
// Probe filter
|
|
|
|
Pf_Replacement, desc="probe filter replacement";
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// DMA requests
|
|
|
|
DMA_READ, desc="A DMA Read memory request";
|
|
|
|
DMA_WRITE, desc="A DMA Write memory request";
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Memory Controller
|
|
|
|
Memory_Data, desc="Fetched data from memory arrives";
|
|
|
|
Memory_Ack, desc="Writeback Ack from memory arrives";
|
2009-11-19 01:34:32 +01:00
|
|
|
|
|
|
|
// Cache responses required to handle DMA
|
|
|
|
Ack, desc="Received an ack message";
|
|
|
|
Shared_Ack, desc="Received an ack message, responder has a shared copy";
|
|
|
|
Shared_Data, desc="Received a data message, responder has a shared copy";
|
2010-08-20 20:46:14 +02:00
|
|
|
Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
|
2009-11-19 01:34:32 +01:00
|
|
|
Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
|
|
|
|
|
|
|
|
// Triggers
|
2010-08-20 20:46:14 +02:00
|
|
|
All_acks_and_shared_data, desc="Received shared data and message acks";
|
|
|
|
All_acks_and_owner_data, desc="Received shared data and message acks";
|
2009-11-19 01:34:32 +01:00
|
|
|
All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TYPES
|
|
|
|
|
|
|
|
// DirectoryEntry
|
2010-01-30 05:29:19 +01:00
|
|
|
structure(Entry, desc="...", interface="AbstractEntry") {
|
2009-11-19 01:34:31 +01:00
|
|
|
State DirectoryState, desc="Directory state";
|
|
|
|
DataBlock DataBlk, desc="data for the block";
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
// ProbeFilterEntry
|
|
|
|
structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
|
|
|
|
State PfState, desc="Directory state";
|
|
|
|
MachineID Owner, desc="Owner node";
|
|
|
|
DataBlock DataBlk, desc="data for the block";
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// TBE entries for DMA requests
|
|
|
|
structure(TBE, desc="TBE entries for outstanding DMA requests") {
|
|
|
|
Address PhysicalAddress, desc="physical address";
|
|
|
|
State TBEState, desc="Transient State";
|
|
|
|
CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
|
2010-08-20 20:46:14 +02:00
|
|
|
int Acks, default="0", desc="The number of acks that the waiting response represents";
|
2009-11-19 01:34:32 +01:00
|
|
|
DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
|
|
|
|
DataBlock DataBlk, desc="The current view of system memory";
|
2009-11-19 01:34:32 +01:00
|
|
|
int Len, desc="...";
|
|
|
|
MachineID DmaRequestor, desc="DMA requestor";
|
2009-11-19 01:34:32 +01:00
|
|
|
int NumPendingMsgs, desc="Number of pending acks/messages";
|
2010-08-20 20:46:14 +02:00
|
|
|
bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
|
|
|
|
bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
|
|
|
|
bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
external_type(TBETable) {
|
|
|
|
TBE lookup(Address);
|
|
|
|
void allocate(Address);
|
|
|
|
void deallocate(Address);
|
|
|
|
bool isPresent(Address);
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
// ** OBJECTS **
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
|
|
|
|
2010-01-30 05:29:19 +01:00
|
|
|
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
|
|
|
|
return static_cast(Entry, directory[addr]);
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
PfEntry getPfEntry(Address addr), return_by_ref="yes" {
|
|
|
|
return static_cast(PfEntry, probeFilter[addr]);
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
State getState(Address addr) {
|
2009-11-19 01:34:32 +01:00
|
|
|
if (TBEs.isPresent(addr)) {
|
|
|
|
return TBEs[addr].TBEState;
|
|
|
|
} else {
|
2010-08-20 20:46:14 +02:00
|
|
|
if (probe_filter_enabled) {
|
|
|
|
if (probeFilter.isTagPresent(addr)) {
|
|
|
|
assert(getPfEntry(addr).PfState == getDirectoryEntry(addr).DirectoryState);
|
|
|
|
} else {
|
|
|
|
assert(getDirectoryEntry(addr).DirectoryState == State:E);
|
|
|
|
}
|
|
|
|
}
|
2010-01-30 05:29:19 +01:00
|
|
|
return getDirectoryEntry(addr).DirectoryState;
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void setState(Address addr, State state) {
|
2009-11-19 01:34:32 +01:00
|
|
|
if (TBEs.isPresent(addr)) {
|
|
|
|
TBEs[addr].TBEState := state;
|
|
|
|
}
|
2010-08-20 20:46:14 +02:00
|
|
|
if (probe_filter_enabled) {
|
|
|
|
if (probeFilter.isTagPresent(addr)) {
|
|
|
|
getPfEntry(addr).PfState := state;
|
|
|
|
}
|
|
|
|
if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
|
|
|
|
assert(probeFilter.isTagPresent(addr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
|
|
|
|
state == State:O) {
|
|
|
|
assert(TBEs.isPresent(addr) == false);
|
|
|
|
}
|
2010-01-30 05:29:19 +01:00
|
|
|
getDirectoryEntry(addr).DirectoryState := state;
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
Event cache_request_to_event(CoherenceRequestType type) {
|
|
|
|
if (type == CoherenceRequestType:GETS) {
|
|
|
|
return Event:GETS;
|
|
|
|
} else if (type == CoherenceRequestType:GETX) {
|
|
|
|
return Event:GETX;
|
|
|
|
} else {
|
|
|
|
error("Invalid CoherenceRequestType");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
MessageBuffer triggerQueue, ordered="true";
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
// ** OUT_PORTS **
|
2009-11-19 01:34:32 +01:00
|
|
|
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
|
2009-11-19 01:34:31 +01:00
|
|
|
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
|
|
|
|
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
|
2009-11-19 01:34:32 +01:00
|
|
|
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
|
|
|
|
out_port(triggerQueue_out, TriggerMsg, triggerQueue);
|
2009-11-19 01:34:31 +01:00
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
//
|
|
|
|
// Memory buffer for memory controller to DIMM communication
|
|
|
|
//
|
|
|
|
out_port(memQueue_out, MemoryMsg, memBuffer);
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
// ** IN_PORTS **
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Trigger Queue
|
2010-08-20 20:46:14 +02:00
|
|
|
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
|
2009-11-19 01:34:32 +01:00
|
|
|
if (triggerQueue_in.isReady()) {
|
|
|
|
peek(triggerQueue_in, TriggerMsg) {
|
|
|
|
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
2010-08-20 20:46:14 +02:00
|
|
|
trigger(Event:All_acks_and_owner_data, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
|
|
|
|
trigger(Event:All_acks_and_shared_data, in_msg.Address);
|
2009-11-19 01:34:32 +01:00
|
|
|
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
|
|
|
trigger(Event:All_acks_and_data_no_sharers, in_msg.Address);
|
|
|
|
} else {
|
|
|
|
error("Unexpected message");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
|
2009-11-19 01:34:31 +01:00
|
|
|
if (unblockNetwork_in.isReady()) {
|
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
|
|
|
trigger(Event:Unblock, in_msg.Address);
|
2010-08-20 20:46:14 +02:00
|
|
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
|
|
|
trigger(Event:UnblockS, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
|
|
|
|
trigger(Event:UnblockM, in_msg.Address);
|
2009-11-19 01:34:31 +01:00
|
|
|
} else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
|
|
|
|
trigger(Event:Writeback_Clean, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
|
|
|
|
trigger(Event:Writeback_Dirty, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
|
|
|
|
trigger(Event:Writeback_Exclusive_Clean, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
|
|
|
|
trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address);
|
|
|
|
} else {
|
|
|
|
error("Invalid message");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Response Network
|
2010-08-20 20:46:14 +02:00
|
|
|
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
|
2009-11-19 01:34:32 +01:00
|
|
|
if (responseToDir_in.isReady()) {
|
|
|
|
peek(responseToDir_in, ResponseMsg) {
|
|
|
|
if (in_msg.Type == CoherenceResponseType:ACK) {
|
|
|
|
trigger(Event:Ack, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
|
|
|
trigger(Event:Shared_Ack, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
|
|
|
trigger(Event:Shared_Data, in_msg.Address);
|
2010-08-20 20:46:14 +02:00
|
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
|
|
|
trigger(Event:Data, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
2009-11-19 01:34:32 +01:00
|
|
|
trigger(Event:Exclusive_Data, in_msg.Address);
|
|
|
|
} else {
|
|
|
|
error("Unexpected message");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
// off-chip memory request/response is done
|
2010-08-20 20:46:14 +02:00
|
|
|
in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
|
2010-08-20 20:46:14 +02:00
|
|
|
if (memQueue_in.isReady()) {
|
|
|
|
peek(memQueue_in, MemoryMsg) {
|
|
|
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
|
|
|
trigger(Event:Memory_Data, in_msg.Address);
|
|
|
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
|
|
|
trigger(Event:Memory_Ack, in_msg.Address);
|
|
|
|
} else {
|
|
|
|
DEBUG_EXPR(in_msg.Type);
|
|
|
|
error("Invalid message");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
|
2009-11-19 01:34:31 +01:00
|
|
|
if (requestQueue_in.isReady()) {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
2010-08-20 20:46:14 +02:00
|
|
|
if (in_msg.Type == CoherenceRequestType:PUT) {
|
2009-11-19 01:34:31 +01:00
|
|
|
trigger(Event:PUT, in_msg.Address);
|
|
|
|
} else {
|
2010-08-20 20:46:14 +02:00
|
|
|
if (probe_filter_enabled) {
|
|
|
|
if (probeFilter.isTagPresent(in_msg.Address)) {
|
|
|
|
trigger(cache_request_to_event(in_msg.Type), in_msg.Address);
|
|
|
|
} else {
|
|
|
|
if (probeFilter.cacheAvail(in_msg.Address)) {
|
|
|
|
trigger(cache_request_to_event(in_msg.Type), in_msg.Address);
|
|
|
|
} else {
|
|
|
|
trigger(Event:Pf_Replacement, probeFilter.cacheProbe(in_msg.Address));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
trigger(cache_request_to_event(in_msg.Type), in_msg.Address);
|
|
|
|
}
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
|
|
|
|
if (dmaRequestQueue_in.isReady()) {
|
|
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
|
|
if (in_msg.Type == DMARequestType:READ) {
|
|
|
|
trigger(Event:DMA_READ, in_msg.LineAddress);
|
|
|
|
} else if (in_msg.Type == DMARequestType:WRITE) {
|
|
|
|
trigger(Event:DMA_WRITE, in_msg.LineAddress);
|
|
|
|
} else {
|
|
|
|
error("Invalid message");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
// Actions
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
assert(probeFilter.isTagPresent(address));
|
|
|
|
probeFilter.setMRU(address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
assert(probeFilter.isTagPresent(address));
|
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
assert(getPfEntry(address).Owner != in_msg.Sender);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(uo_updateOwnerIfPf, "uo", desc="update owner") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
assert(probeFilter.isTagPresent(address));
|
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
getPfEntry(address).Owner := in_msg.Sender;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
2009-11-19 01:34:32 +01:00
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := CoherenceRequestType:WB_ACK;
|
|
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
2009-11-19 01:34:32 +01:00
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := CoherenceRequestType:WB_NACK;
|
|
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
probeFilter.allocate(address, new PfEntry);
|
|
|
|
getPfEntry(in_msg.Address).Owner := in_msg.Requestor;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
probeFilter.deallocate(address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
|
|
|
|
if (probe_filter_enabled && probeFilter.isTagPresent(address)) {
|
|
|
|
probeFilter.deallocate(address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(v_allocateTBE, "v", desc="Allocate TBE") {
|
2009-11-19 01:34:31 +01:00
|
|
|
peek(requestQueue_in, RequestMsg) {
|
2009-11-19 01:34:32 +01:00
|
|
|
TBEs.allocate(address);
|
|
|
|
TBEs[address].PhysicalAddress := address;
|
|
|
|
TBEs[address].ResponseType := CoherenceResponseType:NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
|
|
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
|
|
TBEs.allocate(address);
|
|
|
|
TBEs[address].DmaDataBlk := in_msg.DataBlk;
|
|
|
|
TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
|
|
|
|
TBEs[address].Len := in_msg.Len;
|
|
|
|
TBEs[address].DmaRequestor := in_msg.Requestor;
|
|
|
|
TBEs[address].ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
|
|
//
|
|
|
|
// One ack for each last-level cache
|
|
|
|
//
|
2010-01-30 05:29:34 +01:00
|
|
|
TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache);
|
2009-11-19 01:34:32 +01:00
|
|
|
//
|
|
|
|
// Assume initially that the caches store a clean copy and that memory
|
|
|
|
// will provide the data
|
|
|
|
//
|
|
|
|
TBEs[address].CacheDirty := false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
|
|
|
|
TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
|
|
|
|
TBEs[address].NumPendingMsgs := 1;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
|
|
|
|
TBEs.deallocate(address);
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
|
|
|
|
TBEs[address].Acks := 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
TBEs[address].Acks := machineCount(MachineType:L1Cache);
|
|
|
|
} else {
|
|
|
|
TBEs[address].Acks := 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
|
|
|
|
peek(responseToDir_in, ResponseMsg) {
|
|
|
|
assert(in_msg.Acks > 0);
|
|
|
|
DEBUG_EXPR(TBEs[address].NumPendingMsgs);
|
|
|
|
//
|
|
|
|
// Note that cache data responses will have an ack count of 2. However,
|
|
|
|
// directory DMA requests must wait for acks from all LLC caches, so
|
|
|
|
// only decrement by 1.
|
|
|
|
//
|
|
|
|
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
|
|
|
|
DEBUG_EXPR(TBEs[address].NumPendingMsgs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(n_popResponseQueue, "n", desc="Pop response queue") {
|
|
|
|
responseToDir_in.dequeue();
|
|
|
|
}
|
|
|
|
|
|
|
|
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
|
|
|
|
if (TBEs[address].NumPendingMsgs == 0) {
|
|
|
|
enqueue(triggerQueue_out, TriggerMsg) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
if (TBEs[address].Sharers) {
|
2010-08-20 20:46:14 +02:00
|
|
|
if (TBEs[address].Owned) {
|
|
|
|
out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
|
|
|
|
} else {
|
|
|
|
out_msg.Type := TriggerType:ALL_ACKS;
|
|
|
|
}
|
2009-11-19 01:34:32 +01:00
|
|
|
} else {
|
|
|
|
out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
TBEs[address].NumPendingMsgs := 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
|
|
|
|
if (TBEs[address].NumPendingMsgs == 0) {
|
|
|
|
assert(probe_filter_enabled);
|
|
|
|
enqueue(triggerQueue_out, TriggerMsg) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(d_sendData, "d", desc="Send data to requestor") {
|
|
|
|
peek(memQueue_in, MemoryMsg) {
|
|
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Address := address;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Type := TBEs[address].ResponseType;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Sender := machineID;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
|
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
2010-08-20 20:46:14 +02:00
|
|
|
DEBUG_EXPR(out_msg.DataBlk);
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Dirty := false; // By definition, the block is now clean
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.Acks := TBEs[address].Acks;
|
|
|
|
DEBUG_EXPR(out_msg.Acks);
|
|
|
|
assert(out_msg.Acks > 0);
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
|
|
|
|
peek(memQueue_in, MemoryMsg) {
|
|
|
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
|
|
|
out_msg.PhysicalAddress := address;
|
|
|
|
out_msg.LineAddress := address;
|
|
|
|
out_msg.Type := DMAResponseType:DATA;
|
|
|
|
//
|
|
|
|
// we send the entire data block and rely on the dma controller to
|
|
|
|
// split it up if need be
|
|
|
|
//
|
|
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
|
|
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
|
|
|
|
peek(triggerQueue_in, TriggerMsg) {
|
|
|
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
|
|
|
out_msg.PhysicalAddress := address;
|
|
|
|
out_msg.LineAddress := address;
|
|
|
|
out_msg.Type := DMAResponseType:DATA;
|
|
|
|
//
|
|
|
|
// we send the entire data block and rely on the dma controller to
|
|
|
|
// split it up if need be
|
|
|
|
//
|
|
|
|
out_msg.DataBlk := TBEs[address].DataBlk;
|
|
|
|
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
|
|
|
|
enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
|
|
|
|
out_msg.PhysicalAddress := address;
|
|
|
|
out_msg.LineAddress := address;
|
|
|
|
out_msg.Type := DMAResponseType:ACK;
|
|
|
|
out_msg.Destination.add(TBEs[address].DmaRequestor);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
|
2009-11-19 01:34:31 +01:00
|
|
|
peek(requestQueue_in, RequestMsg) {
|
2009-11-19 01:34:32 +01:00
|
|
|
TBEs[address].ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
|
2009-11-19 01:34:32 +01:00
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
TBEs[address].ResponseType := CoherenceResponseType:DATA;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(r_setSharerBit, "r", desc="We saw other sharers") {
|
|
|
|
TBEs[address].Sharers := true;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(so_setOwnerBit, "so", desc="We saw other sharers") {
|
|
|
|
TBEs[address].Sharers := true;
|
|
|
|
TBEs[address].Owned := true;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Address := address;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Sender := machineID;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
2010-01-30 05:29:19 +01:00
|
|
|
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
|
2009-11-19 01:34:32 +01:00
|
|
|
DEBUG_EXPR(out_msg);
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
|
|
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
|
|
|
out_msg.Sender := machineID;
|
|
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
2010-01-30 05:29:19 +01:00
|
|
|
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
|
2009-11-19 01:34:32 +01:00
|
|
|
DEBUG_EXPR(out_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-11-19 01:34:32 +01:00
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
|
|
|
|
if ((machineCount(MachineType:L1Cache) > 1) && (TBEs[address].Acks <= 1)) {
|
2009-11-19 01:34:31 +01:00
|
|
|
peek(requestQueue_in, RequestMsg) {
|
2009-11-19 01:34:32 +01:00
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := in_msg.Type;
|
|
|
|
out_msg.Requestor := in_msg.Requestor;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
2009-11-19 01:34:31 +01:00
|
|
|
out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
|
2010-08-20 20:46:12 +02:00
|
|
|
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
|
|
|
out_msg.ForwardRequestTime := get_time();
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
|
|
|
|
if (machineCount(MachineType:L1Cache) > 1) {
|
2009-11-19 01:34:32 +01:00
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.Type := CoherenceRequestType:INV;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Requestor := machineID;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
2010-08-20 20:46:12 +02:00
|
|
|
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
|
|
|
|
if (machineCount(MachineType:L1Cache) > 1) {
|
2009-11-19 01:34:32 +01:00
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.Type := CoherenceRequestType:INV;
|
2009-11-19 01:34:32 +01:00
|
|
|
out_msg.Requestor := machineID;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.Destination.add(getPfEntry(address).Owner);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
|
|
out_msg.DirectedProbe := true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
|
|
|
|
if (machineCount(MachineType:L1Cache) > 1) {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := in_msg.Type;
|
|
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
|
|
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
|
|
|
out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
|
|
|
|
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
|
|
|
out_msg.ForwardRequestTime := get_time();
|
2010-08-20 20:46:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
|
|
|
|
assert(machineCount(MachineType:L1Cache) > 1);
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := in_msg.Type;
|
|
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
|
|
out_msg.Destination.add(getPfEntry(address).Owner);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Request_Control;
|
|
|
|
out_msg.DirectedProbe := true;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
|
|
|
out_msg.ForwardRequestTime := get_time();
|
2010-08-20 20:46:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := in_msg.Type;
|
|
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
|
|
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
|
|
|
out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
|
|
|
|
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
|
2010-08-20 20:46:14 +02:00
|
|
|
out_msg.InitialRequestTime := in_msg.InitialRequestTime;
|
|
|
|
out_msg.ForwardRequestTime := get_time();
|
2010-08-20 20:46:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
|
|
|
|
if (TBEs[address].NumPendingMsgs > 0) {
|
|
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := CoherenceRequestType:GETX;
|
|
|
|
//
|
|
|
|
// Send to all L1 caches, since the requestor is the memory controller
|
|
|
|
// itself
|
|
|
|
//
|
|
|
|
out_msg.Requestor := machineID;
|
|
|
|
out_msg.Destination.broadcast(MachineType:L1Cache);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(f_forwardReadFromDma, "fr", desc="Forward requests") {
|
|
|
|
if (TBEs[address].NumPendingMsgs > 0) {
|
|
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := CoherenceRequestType:GETS;
|
|
|
|
//
|
|
|
|
// Send to all L1 caches, since the requestor is the memory controller
|
|
|
|
// itself
|
|
|
|
//
|
|
|
|
out_msg.Requestor := machineID;
|
|
|
|
out_msg.Destination.broadcast(MachineType:L1Cache);
|
|
|
|
out_msg.MessageSize := MessageSizeType:Broadcast_Control;
|
|
|
|
}
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
|
|
|
|
requestQueue_in.dequeue();
|
|
|
|
}
|
|
|
|
|
|
|
|
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
|
2010-08-20 20:46:14 +02:00
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
APPEND_TRANSITION_COMMENT(in_msg.Sender);
|
|
|
|
}
|
2009-11-19 01:34:31 +01:00
|
|
|
unblockNetwork_in.dequeue();
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(k_wakeUpDependents, "k", desc="wake-up dependents") {
|
|
|
|
wake_up_dependents(address);
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
|
|
|
|
memQueue_in.dequeue();
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
|
|
|
|
triggerQueue_in.dequeue();
|
|
|
|
}
|
|
|
|
|
|
|
|
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
|
|
|
|
dmaRequestQueue_in.dequeue();
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
|
|
|
|
peek(dmaRequestQueue_in, DMARequestMsg) {
|
|
|
|
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
|
|
|
|
}
|
|
|
|
stall_and_wait(dmaRequestQueue_in, address);
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
|
|
|
|
peek(memQueue_in, MemoryMsg) {
|
|
|
|
if (TBEs[address].CacheDirty == false) {
|
|
|
|
TBEs[address].DataBlk := in_msg.DataBlk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
|
|
|
|
peek(responseToDir_in, ResponseMsg) {
|
|
|
|
TBEs[address].CacheDirty := true;
|
|
|
|
TBEs[address].DataBlk := in_msg.DataBlk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
|
|
|
|
peek(responseToDir_in, ResponseMsg) {
|
|
|
|
getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
|
|
|
|
DEBUG_EXPR(in_msg.Address);
|
|
|
|
DEBUG_EXPR(in_msg.DataBlk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
|
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
assert(in_msg.Dirty);
|
|
|
|
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
|
2010-01-30 05:29:19 +01:00
|
|
|
getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
|
2009-11-19 01:34:31 +01:00
|
|
|
DEBUG_EXPR(in_msg.Address);
|
|
|
|
DEBUG_EXPR(in_msg.DataBlk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
|
2010-08-20 20:46:14 +02:00
|
|
|
DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
|
2010-01-30 05:29:19 +01:00
|
|
|
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
2010-08-20 20:46:14 +02:00
|
|
|
DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
|
2010-01-30 05:29:19 +01:00
|
|
|
getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
2010-08-20 20:46:14 +02:00
|
|
|
DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
|
|
|
|
}
|
|
|
|
|
|
|
|
action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
|
|
|
|
DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
|
|
|
|
getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
|
|
|
|
DEBUG_EXPR(getDirectoryEntry(address).DataBlk);
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
|
|
|
|
assert(TBEs[address].CacheDirty);
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
|
|
|
|
if (probe_filter_enabled) {
|
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
assert(getPfEntry(address).Owner != in_msg.Requestor);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
|
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
|
|
DEBUG_EXPR(out_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
|
|
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
|
|
|
out_msg.Address := address;
|
|
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
|
|
// first, initialize the data blk to the current version of system memory
|
|
|
|
out_msg.DataBlk := TBEs[address].DataBlk;
|
|
|
|
// then add the dma write data
|
|
|
|
out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
|
|
|
|
DEBUG_EXPR(out_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
|
|
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
|
|
assert(in_msg.Dirty == false);
|
|
|
|
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
|
|
|
|
|
|
|
|
// NOTE: The following check would not be valid in a real
|
|
|
|
// implementation. We include the data in the "dataless"
|
|
|
|
// message so we can assert the clean data matches the datablock
|
|
|
|
// in memory
|
2010-01-30 05:29:19 +01:00
|
|
|
assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
|
2010-08-20 20:46:14 +02:00
|
|
|
peek(requestQueue_in, RequestMsg) {
|
|
|
|
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
|
|
|
|
}
|
2010-08-20 20:46:14 +02:00
|
|
|
stall_and_wait(requestQueue_in, address);
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TRANSITIONS
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Transitions out of E state
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(E, GETX, NO_B_W) {
|
2010-08-20 20:46:14 +02:00
|
|
|
pfa_probeFilterAllocate;
|
2009-11-19 01:34:32 +01:00
|
|
|
v_allocateTBE;
|
|
|
|
rx_recordExclusiveInTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
saa_setAcksToAllIfPF;
|
2009-11-19 01:34:32 +01:00
|
|
|
qf_queueMemoryFetchRequest;
|
2010-08-20 20:46:14 +02:00
|
|
|
fn_forwardRequestIfNecessary;
|
2009-11-19 01:34:31 +01:00
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(E, GETS, NO_B_W) {
|
2010-08-20 20:46:14 +02:00
|
|
|
pfa_probeFilterAllocate;
|
2009-11-19 01:34:32 +01:00
|
|
|
v_allocateTBE;
|
|
|
|
rx_recordExclusiveInTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
saa_setAcksToAllIfPF;
|
2009-11-19 01:34:32 +01:00
|
|
|
qf_queueMemoryFetchRequest;
|
2010-08-20 20:46:14 +02:00
|
|
|
fn_forwardRequestIfNecessary;
|
2009-11-19 01:34:31 +01:00
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(E, DMA_READ, NO_DR_B_W) {
|
|
|
|
vd_allocateDmaRequestInTBE;
|
|
|
|
qd_queueMemoryRequestFromDmaRead;
|
2010-08-20 20:46:14 +02:00
|
|
|
spa_setPendingAcksToZeroIfPF;
|
2009-11-19 01:34:32 +01:00
|
|
|
f_forwardReadFromDma;
|
|
|
|
p_popDmaRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(E, DMA_WRITE, NO_DW_B_W) {
|
|
|
|
vd_allocateDmaRequestInTBE;
|
|
|
|
spa_setPendingAcksToZeroIfPF;
|
|
|
|
sc_signalCompletionIfPF;
|
|
|
|
f_forwardWriteFromDma;
|
|
|
|
p_popDmaRequestQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Transitions out of O state
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(O, GETX, NO_B_W) {
|
2010-08-20 20:46:14 +02:00
|
|
|
r_setMRU;
|
2009-11-19 01:34:32 +01:00
|
|
|
v_allocateTBE;
|
|
|
|
r_recordDataInTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
sa_setAcksToOne;
|
2009-11-19 01:34:32 +01:00
|
|
|
qf_queueMemoryFetchRequest;
|
2010-08-20 20:46:14 +02:00
|
|
|
fb_forwardRequestBcast;
|
2009-11-19 01:34:31 +01:00
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
// This transition is dumb, if a shared copy exists on-chip, then that should
|
|
|
|
// provide data, not slow off-chip dram. The problem is that the current
|
|
|
|
// caches don't provide data in S state
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(O, GETS, O_B_W) {
|
2010-08-20 20:46:14 +02:00
|
|
|
r_setMRU;
|
2009-11-19 01:34:32 +01:00
|
|
|
v_allocateTBE;
|
|
|
|
r_recordDataInTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
saa_setAcksToAllIfPF;
|
2009-11-19 01:34:32 +01:00
|
|
|
qf_queueMemoryFetchRequest;
|
2010-08-20 20:46:14 +02:00
|
|
|
fn_forwardRequestIfNecessary;
|
2009-11-19 01:34:31 +01:00
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(O, DMA_READ, O_DR_B_W) {
|
|
|
|
vd_allocateDmaRequestInTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
spa_setPendingAcksToZeroIfPF;
|
2009-11-19 01:34:32 +01:00
|
|
|
qd_queueMemoryRequestFromDmaRead;
|
|
|
|
f_forwardReadFromDma;
|
|
|
|
p_popDmaRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(O, Pf_Replacement, O_R) {
|
|
|
|
v_allocateTBE;
|
|
|
|
pa_setPendingMsgsToAll;
|
|
|
|
ia_invalidateAllRequest;
|
|
|
|
pfd_probeFilterDeallocate;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(S, Pf_Replacement, S_R) {
|
|
|
|
v_allocateTBE;
|
|
|
|
pa_setPendingMsgsToAll;
|
|
|
|
ia_invalidateAllRequest;
|
|
|
|
pfd_probeFilterDeallocate;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO, Pf_Replacement, NO_R) {
|
|
|
|
v_allocateTBE;
|
|
|
|
po_setPendingMsgsToOne;
|
|
|
|
io_invalidateOwnerRequest;
|
|
|
|
pfd_probeFilterDeallocate;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NX, Pf_Replacement, NO_R) {
|
|
|
|
v_allocateTBE;
|
|
|
|
pa_setPendingMsgsToAll;
|
|
|
|
ia_invalidateAllRequest;
|
|
|
|
pfd_probeFilterDeallocate;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
|
2009-11-19 01:34:32 +01:00
|
|
|
vd_allocateDmaRequestInTBE;
|
|
|
|
f_forwardWriteFromDma;
|
|
|
|
p_popDmaRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
// Transitions out of NO state
|
|
|
|
transition(NX, GETX, NO_B) {
|
|
|
|
r_setMRU;
|
|
|
|
fb_forwardRequestBcast;
|
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Transitions out of NO state
|
2009-11-19 01:34:31 +01:00
|
|
|
transition(NO, GETX, NO_B) {
|
2010-08-20 20:46:14 +02:00
|
|
|
r_setMRU;
|
|
|
|
ano_assertNotOwner;
|
|
|
|
fc_forwardRequestConditionalOwner;
|
2009-11-19 01:34:31 +01:00
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(S, GETX, NO_B) {
|
|
|
|
r_setMRU;
|
|
|
|
fb_forwardRequestBcast;
|
2009-11-19 01:34:31 +01:00
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(S, GETS, NO_B) {
|
|
|
|
r_setMRU;
|
|
|
|
ano_assertNotOwner;
|
|
|
|
fb_forwardRequestBcast;
|
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({NX, NO}, GETS, NO_B) {
|
|
|
|
r_setMRU;
|
|
|
|
ano_assertNotOwner;
|
|
|
|
fc_forwardRequestConditionalOwner;
|
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({NO, NX, S}, PUT, WB) {
|
|
|
|
//
|
|
|
|
// note that the PUT requestor may not be the current owner if an invalidate
|
|
|
|
// raced with PUT
|
|
|
|
//
|
2009-11-19 01:34:31 +01:00
|
|
|
a_sendWriteBackAck;
|
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
|
2009-11-19 01:34:32 +01:00
|
|
|
vd_allocateDmaRequestInTBE;
|
|
|
|
f_forwardReadFromDma;
|
|
|
|
p_popDmaRequestQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nack PUT requests when races cause us to believe we own the data
|
2009-11-19 01:34:31 +01:00
|
|
|
transition({O, E}, PUT) {
|
|
|
|
b_sendWriteBackNack;
|
|
|
|
i_popIncomingRequestQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// Blocked transient states
|
|
|
|
transition({NO_B, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
|
|
|
|
NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W,
|
2010-08-20 20:46:14 +02:00
|
|
|
NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
|
|
|
|
{GETS, GETX, PUT, Pf_Replacement}) {
|
2010-08-20 20:46:14 +02:00
|
|
|
z_stallAndWaitRequest;
|
2009-11-19 01:34:31 +01:00
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition({NO_B, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
|
|
|
|
NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W,
|
2010-08-20 20:46:14 +02:00
|
|
|
NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
|
2009-11-19 01:34:32 +01:00
|
|
|
{DMA_READ, DMA_WRITE}) {
|
2010-08-20 20:46:14 +02:00
|
|
|
zd_stallAndWaitDMARequest;
|
2009-11-19 01:34:32 +01:00
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(NO_B, UnblockS, NX) {
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(NO_B, UnblockM, NO) {
|
|
|
|
uo_updateOwnerIfPf;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2010-08-20 20:46:14 +02:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(O_B, UnblockS, O) {
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(NO_B_W, Memory_Data, NO_B) {
|
|
|
|
d_sendData;
|
|
|
|
w_deallocateTBE;
|
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
|
|
|
|
r_recordMemoryData;
|
|
|
|
o_checkForCompletion;
|
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(O_DR_B_W, Memory_Data, O_DR_B) {
|
|
|
|
r_recordMemoryData;
|
|
|
|
dr_sendDmaData;
|
|
|
|
o_checkForCompletion;
|
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition({O_R, S_R, NO_R}, Ack) {
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(S_R, Data) {
|
|
|
|
wr_writeResponseDataToMemory;
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_R, {Data, Exclusive_Data}) {
|
|
|
|
wr_writeResponseDataToMemory;
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2010-08-20 20:46:14 +02:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({NO_DR_B_W, O_DR_B_W}, Ack) {
|
2009-11-19 01:34:32 +01:00
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DR_B_W, Shared_Ack) {
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
r_setSharerBit;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(O_DR_B, Shared_Ack) {
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
so_setOwnerBit;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(O_DR_B_W, Shared_Ack) {
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
so_setOwnerBit;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
r_setSharerBit;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DR_B_W, Shared_Data) {
|
|
|
|
r_recordCacheData;
|
|
|
|
m_decrementNumberOfMessages;
|
2010-08-20 20:46:14 +02:00
|
|
|
so_setOwnerBit;
|
2009-11-19 01:34:32 +01:00
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
|
|
|
|
r_recordCacheData;
|
|
|
|
m_decrementNumberOfMessages;
|
2010-08-20 20:46:14 +02:00
|
|
|
so_setOwnerBit;
|
2009-11-19 01:34:32 +01:00
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(NO_DR_B_W, {Exclusive_Data, Data}) {
|
2009-11-19 01:34:32 +01:00
|
|
|
r_recordCacheData;
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
|
2009-11-19 01:34:32 +01:00
|
|
|
r_recordCacheData;
|
|
|
|
m_decrementNumberOfMessages;
|
|
|
|
o_checkForCompletion;
|
|
|
|
n_popResponseQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(NO_DR_B, All_acks_and_owner_data, O) {
|
|
|
|
//
|
|
|
|
// Note that the DMA consistency model allows us to send the DMA device
|
|
|
|
// a response as soon as we receive valid data and prior to receiving
|
|
|
|
// all acks. However, to simplify the protocol we wait for all acks.
|
|
|
|
//
|
|
|
|
dt_sendDmaDataFromTbe;
|
|
|
|
wdt_writeDataFromTBE;
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2010-08-20 20:46:14 +02:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DR_B, All_acks_and_shared_data, S) {
|
2009-11-19 01:34:32 +01:00
|
|
|
//
|
|
|
|
// Note that the DMA consistency model allows us to send the DMA device
|
|
|
|
// a response as soon as we receive valid data and prior to receiving
|
|
|
|
// all acks. However, to simplify the protocol we wait for all acks.
|
|
|
|
//
|
|
|
|
dt_sendDmaDataFromTbe;
|
2010-08-20 20:46:14 +02:00
|
|
|
wdt_writeDataFromTBE;
|
2009-11-19 01:34:32 +01:00
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(NO_DR_B_D, All_acks_and_owner_data, O) {
|
2009-11-19 01:34:32 +01:00
|
|
|
//
|
|
|
|
// Note that the DMA consistency model allows us to send the DMA device
|
|
|
|
// a response as soon as we receive valid data and prior to receiving
|
|
|
|
// all acks. However, to simplify the protocol we wait for all acks.
|
|
|
|
//
|
|
|
|
dt_sendDmaDataFromTbe;
|
2010-08-20 20:46:14 +02:00
|
|
|
wdt_writeDataFromTBE;
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2010-08-20 20:46:14 +02:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DR_B_D, All_acks_and_shared_data, S) {
|
|
|
|
//
|
|
|
|
// Note that the DMA consistency model allows us to send the DMA device
|
|
|
|
// a response as soon as we receive valid data and prior to receiving
|
|
|
|
// all acks. However, to simplify the protocol we wait for all acks.
|
|
|
|
//
|
|
|
|
dt_sendDmaDataFromTbe;
|
|
|
|
wdt_writeDataFromTBE;
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2010-08-20 20:46:14 +02:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(O_DR_B, All_acks_and_owner_data, O) {
|
|
|
|
wdt_writeDataFromTBE;
|
2009-11-19 01:34:32 +01:00
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(O_DR_B, All_acks_and_data_no_sharers, E) {
|
|
|
|
wdt_writeDataFromTBE;
|
2009-11-19 01:34:32 +01:00
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
pfd_probeFilterDeallocate;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
|
|
|
|
//
|
|
|
|
// Note that the DMA consistency model allows us to send the DMA device
|
|
|
|
// a response as soon as we receive valid data and prior to receiving
|
|
|
|
// all acks. However, to simplify the protocol we wait for all acks.
|
|
|
|
//
|
|
|
|
dt_sendDmaDataFromTbe;
|
2010-08-20 20:46:14 +02:00
|
|
|
wdt_writeDataFromTBE;
|
2009-11-19 01:34:32 +01:00
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
ppfd_possibleProbeFilterDeallocate;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
|
|
|
|
a_assertCacheData;
|
|
|
|
//
|
|
|
|
// Note that the DMA consistency model allows us to send the DMA device
|
|
|
|
// a response as soon as we receive valid data and prior to receiving
|
|
|
|
// all acks. However, to simplify the protocol we wait for all acks.
|
|
|
|
//
|
|
|
|
dt_sendDmaDataFromTbe;
|
2010-08-20 20:46:14 +02:00
|
|
|
wdt_writeDataFromTBE;
|
2009-11-19 01:34:32 +01:00
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
ppfd_possibleProbeFilterDeallocate;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
|
|
|
|
dwt_writeDmaDataFromTBE;
|
|
|
|
ld_queueMemoryDmaWrite;
|
|
|
|
g_popTriggerQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_DW_W, Memory_Ack, E) {
|
|
|
|
da_sendDmaAck;
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
ppfd_possibleProbeFilterDeallocate;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(O_B_W, Memory_Data, O_B) {
|
|
|
|
d_sendData;
|
|
|
|
w_deallocateTBE;
|
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(NO_B_W, {UnblockM, UnblockS}, NO_W) {
|
2009-11-19 01:34:32 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:46:14 +02:00
|
|
|
transition(O_B_W, UnblockS, O_W) {
|
2009-11-19 01:34:32 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(NO_W, Memory_Data, NO) {
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(O_W, Memory_Data, O) {
|
|
|
|
w_deallocateTBE;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
// WB State Transistions
|
2010-01-30 05:29:22 +01:00
|
|
|
transition(WB, Writeback_Dirty, WB_O_W) {
|
2009-11-19 01:34:31 +01:00
|
|
|
l_writeDataToMemory;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_queueMemoryWBRequest;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
2010-01-30 05:29:22 +01:00
|
|
|
transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
|
2009-11-19 01:34:31 +01:00
|
|
|
l_writeDataToMemory;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_queueMemoryWBRequest;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:32 +01:00
|
|
|
transition(WB_E_W, Memory_Ack, E) {
|
2010-08-20 20:46:14 +02:00
|
|
|
pfd_probeFilterDeallocate;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(WB_O_W, Memory_Ack, O) {
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:32 +01:00
|
|
|
l_popMemQueue;
|
|
|
|
}
|
|
|
|
|
2009-11-19 01:34:31 +01:00
|
|
|
transition(WB, Writeback_Clean, O) {
|
|
|
|
ll_checkIncomingWriteback;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(WB, Writeback_Exclusive_Clean, E) {
|
|
|
|
ll_checkIncomingWriteback;
|
2010-08-20 20:46:14 +02:00
|
|
|
pfd_probeFilterDeallocate;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
transition(WB, Unblock, NO) {
|
2010-08-20 20:46:14 +02:00
|
|
|
auno_assertUnblockerNotOwner;
|
2010-08-20 20:46:14 +02:00
|
|
|
k_wakeUpDependents;
|
2009-11-19 01:34:31 +01:00
|
|
|
j_popIncomingUnblockQueue;
|
|
|
|
}
|
|
|
|
}
|