33b28fde7a
This changeset contains a lot of different changes that are too mingled to separate. They are: 1. Added MOESI_CMP_directory I made the changes necessary to bring back MOESI_CMP_directory, including adding a DMA controller. I got rid of MOESI_CMP_directory_m and made MOESI_CMP_directory use a memory controller. Added a new configuration for two level protocols in general, and MOESI_CMP_directory in particular. 2. DMA Sequencer uses a generic SequencerMsg I will eventually make the cache Sequencer use this type as well. It doesn't contain an offset field, just a physical address and a length. MI_example has been updated to deal with this. 3. Parameterized Controllers SLICC controllers can now take custom parameters to use for mapping, latencies, etc. Currently, only int parameters are supported.
826 lines
26 KiB
Text
826 lines
26 KiB
Text
|
|
/*
|
|
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are
|
|
* met: redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer;
|
|
* redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution;
|
|
* neither the name of the copyright holders nor the names of its
|
|
* contributors may be used to endorse or promote products derived from
|
|
* this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
* $Id$
|
|
*/
|
|
|
|
machine(Directory, "Directory protocol")
|
|
: int directory_latency
|
|
{
|
|
|
|
// ** IN QUEUES **
|
|
MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
|
|
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
|
|
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
|
|
|
|
MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
|
|
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
|
|
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
|
|
|
|
|
|
// STATES
|
|
enumeration(State, desc="Directory states", default="Directory_State_I") {
|
|
// Base states
|
|
I, desc="Invalid";
|
|
S, desc="Shared";
|
|
O, desc="Owner";
|
|
M, desc="Modified";
|
|
|
|
IS, desc="Blocked, was in idle";
|
|
SS, desc="Blocked, was in shared";
|
|
OO, desc="Blocked, was in owned";
|
|
MO, desc="Blocked, going to owner or maybe modified";
|
|
MM, desc="Blocked, going to modified";
|
|
MM_DMA, desc="Blocked, going to I";
|
|
|
|
MI, desc="Blocked on a writeback";
|
|
MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
|
|
OS, desc="Blocked on a writeback";
|
|
OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
|
|
|
|
XI_M, desc="In a stable state, going to I, waiting for the memory controller";
|
|
XI_U, desc="In a stable state, going to I, waiting for an unblock";
|
|
OI_D, desc="In O, going to I, waiting for data";
|
|
}
|
|
|
|
// Events
|
|
enumeration(Event, desc="Directory events") {
|
|
GETX, desc="A GETX arrives";
|
|
GETS, desc="A GETS arrives";
|
|
PUTX, desc="A PUTX arrives";
|
|
PUTO, desc="A PUTO arrives";
|
|
PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
|
|
Unblock, desc="An unblock message arrives";
|
|
Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
|
|
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
|
|
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
|
|
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
|
|
Memory_Data, desc="Fetched data from memory arrives";
|
|
Memory_Ack, desc="Writeback Ack from memory arrives";
|
|
DMA_READ, desc="DMA Read";
|
|
DMA_WRITE, desc="DMA Write";
|
|
Data, desc="Data to directory";
|
|
}
|
|
|
|
// TYPES
|
|
|
|
// DirectoryEntry
|
|
structure(Entry, desc="...") {
|
|
State DirectoryState, desc="Directory state";
|
|
DataBlock DataBlk, desc="data for the block";
|
|
NetDest Sharers, desc="Sharers for this block";
|
|
NetDest Owner, desc="Owner of this block";
|
|
int WaitingUnblocks, desc="Number of acks we're waiting for";
|
|
}
|
|
|
|
structure(TBE, desc="...") {
|
|
Address address, desc="Address for this entry";
|
|
int Len, desc="Length of request";
|
|
DataBlock DataBlk, desc="DataBlk";
|
|
MachineID Requestor, desc="original requestor";
|
|
}
|
|
|
|
external_type(DirectoryMemory) {
|
|
Entry lookup(Address);
|
|
bool isPresent(Address);
|
|
}
|
|
|
|
external_type(TBETable) {
|
|
TBE lookup(Address);
|
|
void allocate(Address);
|
|
void deallocate(Address);
|
|
bool isPresent(Address);
|
|
}
|
|
|
|
// to simulate detailed DRAM
|
|
external_type(MemoryControl, inport="yes", outport="yes") {
|
|
|
|
}
|
|
|
|
|
|
// ** OBJECTS **
|
|
|
|
DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
|
|
MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
|
|
TBETable TBEs, template_hack="<Directory_TBE>";
|
|
|
|
State getState(Address addr) {
|
|
return directory[addr].DirectoryState;
|
|
}
|
|
|
|
void setState(Address addr, State state) {
|
|
if (directory.isPresent(addr)) {
|
|
|
|
if (state == State:I) {
|
|
assert(directory[addr].Owner.count() == 0);
|
|
assert(directory[addr].Sharers.count() == 0);
|
|
}
|
|
|
|
if (state == State:S) {
|
|
assert(directory[addr].Owner.count() == 0);
|
|
}
|
|
|
|
if (state == State:O) {
|
|
assert(directory[addr].Owner.count() == 1);
|
|
assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
|
|
}
|
|
|
|
if (state == State:M) {
|
|
assert(directory[addr].Owner.count() == 1);
|
|
assert(directory[addr].Sharers.count() == 0);
|
|
}
|
|
|
|
if ((state != State:SS) && (state != State:OO)) {
|
|
assert(directory[addr].WaitingUnblocks == 0);
|
|
}
|
|
|
|
if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
|
|
directory[addr].DirectoryState := state;
|
|
// disable coherence checker
|
|
// sequencer.checkCoherence(addr);
|
|
}
|
|
else {
|
|
directory[addr].DirectoryState := state;
|
|
}
|
|
}
|
|
}
|
|
|
|
// if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
|
|
bool isBlockShared(Address addr) {
|
|
if (directory.isPresent(addr)) {
|
|
if (directory[addr].DirectoryState == State:I) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool isBlockExclusive(Address addr) {
|
|
if (directory.isPresent(addr)) {
|
|
if (directory[addr].DirectoryState == State:I) {
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
|
|
// ** OUT_PORTS **
|
|
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
|
|
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
|
|
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
|
|
out_port(goo1_out, ResponseMsg, goo1);
|
|
out_port(memQueue_out, MemoryMsg, memBuffer);
|
|
|
|
// ** IN_PORTS **
|
|
|
|
in_port(foo1_in, ResponseMsg, foo1) {
|
|
|
|
}
|
|
|
|
// in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
|
|
// if (unblockNetwork_in.isReady()) {
|
|
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
|
|
if (unblockNetwork_in.isReady()) {
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
|
if (directory[in_msg.Address].WaitingUnblocks == 1) {
|
|
trigger(Event:Last_Unblock, in_msg.Address);
|
|
} else {
|
|
trigger(Event:Unblock, in_msg.Address);
|
|
}
|
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
|
trigger(Event:Exclusive_Unblock, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
|
trigger(Event:Dirty_Writeback, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
|
trigger(Event:Clean_Writeback, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
|
trigger(Event:Data, in_msg.Address);
|
|
} else {
|
|
error("Invalid message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
|
if (requestQueue_in.isReady()) {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
|
trigger(Event:GETS, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
|
trigger(Event:GETX, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
|
trigger(Event:PUTX, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
|
trigger(Event:PUTO, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
|
trigger(Event:PUTO_SHARERS, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
|
trigger(Event:DMA_READ, in_msg.Address);
|
|
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
|
trigger(Event:DMA_WRITE, in_msg.Address);
|
|
} else {
|
|
error("Invalid message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// off-chip memory request/response is done
|
|
in_port(memQueue_in, MemoryMsg, memBuffer) {
|
|
if (memQueue_in.isReady()) {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
|
trigger(Event:Memory_Data, in_msg.Address);
|
|
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
|
trigger(Event:Memory_Ack, in_msg.Address);
|
|
} else {
|
|
DEBUG_EXPR(in_msg.Type);
|
|
error("Invalid message");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Actions
|
|
|
|
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:WB_ACK;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:WB_NACK;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination.add(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(c_clearOwner, "c", desc="Clear the owner field") {
|
|
directory[address].Owner.clear();
|
|
}
|
|
|
|
action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
|
|
directory[address].Sharers.addNetDest(directory[address].Owner);
|
|
directory[address].Owner.clear();
|
|
}
|
|
|
|
action(cc_clearSharers, "\c", desc="Clear the sharers field") {
|
|
directory[address].Sharers.clear();
|
|
}
|
|
|
|
action(d_sendDataMsg, "d", desc="Send data to requestor") {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
|
|
out_msg.Address := address;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:Directory;
|
|
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
|
//out_msg.DataBlk := directory[in_msg.Address].DataBlk;
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.Dirty := false; // By definition, the block is now clean
|
|
out_msg.Acks := in_msg.Acks;
|
|
if (in_msg.ReadX) {
|
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
|
} else {
|
|
out_msg.Type := CoherenceResponseType:DATA;
|
|
}
|
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
directory[address].Owner.clear();
|
|
directory[address].Owner.add(in_msg.Sender);
|
|
}
|
|
}
|
|
|
|
action(f_forwardRequest, "f", desc="Forward request to owner") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := in_msg.Type;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
|
|
out_msg.Acks := directory[address].Sharers.count();
|
|
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
|
|
out_msg.Acks := out_msg.Acks - 1;
|
|
}
|
|
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := in_msg.Type;
|
|
out_msg.Requestor := machineID;
|
|
out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
|
|
out_msg.Acks := directory[address].Sharers.count();
|
|
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
|
|
out_msg.Acks := out_msg.Acks - 1;
|
|
}
|
|
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
if ((directory[in_msg.Address].Sharers.count() > 1) ||
|
|
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
|
|
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
|
|
out_msg.Address := address;
|
|
out_msg.Type := CoherenceRequestType:INV;
|
|
out_msg.Requestor := in_msg.Requestor;
|
|
// out_msg.Destination := directory[in_msg.Address].Sharers;
|
|
out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
|
|
out_msg.Destination.remove(in_msg.Requestor);
|
|
out_msg.MessageSize := MessageSizeType:Invalidate_Control;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
|
|
requestQueue_in.dequeue();
|
|
}
|
|
|
|
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
|
|
unblockNetwork_in.dequeue();
|
|
}
|
|
|
|
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
assert(in_msg.Dirty);
|
|
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
|
|
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
|
|
DEBUG_EXPR(in_msg.Address);
|
|
DEBUG_EXPR(in_msg.DataBlk);
|
|
}
|
|
}
|
|
|
|
action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
assert(in_msg.Dirty == false);
|
|
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
|
|
|
|
// NOTE: The following check would not be valid in a real
|
|
// implementation. We include the data in the "dataless"
|
|
// message so we can assert the clean data matches the datablock
|
|
// in memory
|
|
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
|
|
}
|
|
}
|
|
|
|
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
directory[address].Sharers.add(in_msg.Sender);
|
|
}
|
|
}
|
|
|
|
action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
|
|
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
|
|
}
|
|
|
|
action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
|
|
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
|
|
assert(directory[address].WaitingUnblocks >= 0);
|
|
}
|
|
|
|
action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
|
|
memQueue_in.dequeue();
|
|
}
|
|
|
|
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_READ;
|
|
out_msg.Sender := machineID;
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
//out_msg.Prefetch := false;
|
|
// These are not used by memory but are passed back here with the read data:
|
|
out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
|
|
out_msg.Acks := directory[address].Sharers.count();
|
|
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
|
|
out_msg.Acks := out_msg.Acks - 1;
|
|
}
|
|
DEBUG_EXPR(out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
|
|
peek(unblockNetwork_in, ResponseMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
out_msg.Sender := machineID;
|
|
if (TBEs.isPresent(address)) {
|
|
out_msg.OriginalRequestorMachId := TBEs[address].Requestor;
|
|
}
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
//out_msg.Prefetch := false;
|
|
// Not used:
|
|
out_msg.ReadX := false;
|
|
out_msg.Acks := directory[address].Sharers.count(); // for dma requests
|
|
DEBUG_EXPR(out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
enqueue(memQueue_out, MemoryMsg, latency="1") {
|
|
out_msg.Address := address;
|
|
out_msg.Type := MemoryRequestType:MEMORY_WB;
|
|
out_msg.Sender := machineID;
|
|
out_msg.OriginalRequestorMachId := in_msg.Requestor;
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.MessageSize := in_msg.MessageSize;
|
|
//out_msg.Prefetch := false;
|
|
// Not used:
|
|
out_msg.ReadX := false;
|
|
out_msg.Acks := directory[address].Sharers.count(); // for dma requests
|
|
DEBUG_EXPR(out_msg);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
// action(z_stall, "z", desc="Cannot be handled right now.") {
|
|
// Special name recognized as do nothing case
|
|
// }
|
|
|
|
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
|
|
requestQueue_in.recycle();
|
|
}
|
|
|
|
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
|
|
peek(memQueue_in, MemoryMsg) {
|
|
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
|
|
out_msg.Address := address;
|
|
out_msg.Sender := machineID;
|
|
out_msg.SenderMachine := MachineType:Directory;
|
|
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
|
out_msg.DataBlk := in_msg.DataBlk;
|
|
out_msg.Acks := in_msg.Acks;
|
|
out_msg.Type := CoherenceResponseType:DMA_ACK;
|
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
|
}
|
|
}
|
|
}
|
|
|
|
action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
|
|
peek(requestQueue_in, RequestMsg) {
|
|
directory[address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
|
|
}
|
|
}
|
|
|
|
action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
|
|
directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(address), TBEs[address].Len);
|
|
}
|
|
|
|
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
|
|
peek (requestQueue_in, RequestMsg) {
|
|
TBEs.allocate(address);
|
|
TBEs[address].Len := in_msg.Len;
|
|
TBEs[address].DataBlk := in_msg.DataBlk;
|
|
TBEs[address].Requestor := in_msg.Requestor;
|
|
}
|
|
}
|
|
|
|
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
|
|
TBEs.deallocate(address);
|
|
}
|
|
|
|
|
|
|
|
// TRANSITIONS
|
|
|
|
transition(I, GETX, MM) {
|
|
qf_queueMemoryFetchRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(I, DMA_READ, XI_M) {
|
|
qf_queueMemoryFetchRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(I, DMA_WRITE, XI_M) {
|
|
qw_queueMemoryWBRequest2;
|
|
l_writeDMADataToMemory;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(XI_M, Memory_Data, XI_U) {
|
|
d_sendDataMsg; // ack count may be zero
|
|
q_popMemQueue;
|
|
}
|
|
|
|
transition(XI_M, Memory_Ack, XI_U) {
|
|
a_sendDMAAck; // ack count may be zero
|
|
q_popMemQueue;
|
|
}
|
|
|
|
transition(XI_U, Exclusive_Unblock, I) {
|
|
cc_clearSharers;
|
|
c_clearOwner;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(S, GETX, MM) {
|
|
qf_queueMemoryFetchRequest;
|
|
g_sendInvalidations;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(S, DMA_READ, XI_M) {
|
|
qf_queueMemoryFetchRequest;
|
|
g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(S, DMA_WRITE, XI_M) {
|
|
qw_queueMemoryWBRequest2;
|
|
l_writeDMADataToMemory;
|
|
g_sendInvalidations; // the DMA will collect invalidations
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(I, GETS, IS) {
|
|
qf_queueMemoryFetchRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition({S, SS}, GETS, SS) {
|
|
qf_queueMemoryFetchRequest;
|
|
n_incrementOutstanding;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition({I, S}, PUTO) {
|
|
b_sendWriteBackNack;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition({I, S, O}, PUTX) {
|
|
b_sendWriteBackNack;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(O, GETX, MM) {
|
|
f_forwardRequest;
|
|
g_sendInvalidations;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(O, DMA_READ, XI_U) {
|
|
f_forwardRequest; // this will cause the data to go to DMA directly
|
|
g_sendInvalidations; // this will cause acks to be sent to the DMA
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition({O,M}, DMA_WRITE, OI_D) {
|
|
f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
|
|
g_sendInvalidations; // these go to the DMA Controller
|
|
v_allocateTBE;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(OI_D, Data, XI_M) {
|
|
qw_queueMemoryWBRequest;
|
|
l_writeDataToMemory;
|
|
l_writeDMADataToMemoryFromTBE;
|
|
w_deallocateTBE;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition({O, OO}, GETS, OO) {
|
|
f_forwardRequest;
|
|
n_incrementOutstanding;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M, GETX, MM) {
|
|
f_forwardRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
// no exclusive unblock will show up to the directory
|
|
transition(M, DMA_READ, XI_U) {
|
|
f_forwardRequest; // this will cause the data to go to DMA directly
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M, GETS, MO) {
|
|
f_forwardRequest;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M, PUTX, MI) {
|
|
a_sendWriteBackAck;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
// happens if M->O transition happens on-chip
|
|
transition(M, PUTO, MI) {
|
|
a_sendWriteBackAck;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(M, PUTO_SHARERS, MIS) {
|
|
a_sendWriteBackAck;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(O, PUTO, OS) {
|
|
a_sendWriteBackAck;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
transition(O, PUTO_SHARERS, OSS) {
|
|
a_sendWriteBackAck;
|
|
i_popIncomingRequestQueue;
|
|
}
|
|
|
|
|
|
transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
|
|
zz_recycleRequest;
|
|
}
|
|
|
|
transition({MM, MO}, Exclusive_Unblock, M) {
|
|
cc_clearSharers;
|
|
e_ownerIsUnblocker;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(MO, Unblock, O) {
|
|
m_addUnlockerToSharers;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
|
|
zz_recycleRequest;
|
|
}
|
|
|
|
transition(IS, GETS) {
|
|
zz_recycleRequest;
|
|
}
|
|
|
|
transition(IS, Unblock, S) {
|
|
m_addUnlockerToSharers;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(IS, Exclusive_Unblock, M) {
|
|
cc_clearSharers;
|
|
e_ownerIsUnblocker;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(SS, Unblock) {
|
|
m_addUnlockerToSharers;
|
|
o_decrementOutstanding;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(SS, Last_Unblock, S) {
|
|
m_addUnlockerToSharers;
|
|
o_decrementOutstanding;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(OO, Unblock) {
|
|
m_addUnlockerToSharers;
|
|
o_decrementOutstanding;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(OO, Last_Unblock, O) {
|
|
m_addUnlockerToSharers;
|
|
o_decrementOutstanding;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(MI, Dirty_Writeback, I) {
|
|
c_clearOwner;
|
|
cc_clearSharers;
|
|
l_writeDataToMemory;
|
|
qw_queueMemoryWBRequest;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(MIS, Dirty_Writeback, S) {
|
|
c_moveOwnerToSharer;
|
|
l_writeDataToMemory;
|
|
qw_queueMemoryWBRequest;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(MIS, Clean_Writeback, S) {
|
|
c_moveOwnerToSharer;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(OS, Dirty_Writeback, S) {
|
|
c_clearOwner;
|
|
l_writeDataToMemory;
|
|
qw_queueMemoryWBRequest;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(OSS, Dirty_Writeback, S) {
|
|
c_moveOwnerToSharer;
|
|
l_writeDataToMemory;
|
|
qw_queueMemoryWBRequest;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(OSS, Clean_Writeback, S) {
|
|
c_moveOwnerToSharer;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(MI, Clean_Writeback, I) {
|
|
c_clearOwner;
|
|
cc_clearSharers;
|
|
ll_checkDataInMemory;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition(OS, Clean_Writeback, S) {
|
|
c_clearOwner;
|
|
ll_checkDataInMemory;
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition({MI, MIS}, Unblock, M) {
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition({OS, OSS}, Unblock, O) {
|
|
j_popIncomingUnblockQueue;
|
|
}
|
|
|
|
transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
|
|
d_sendDataMsg;
|
|
q_popMemQueue;
|
|
}
|
|
|
|
transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
|
|
//a_sendAck;
|
|
q_popMemQueue;
|
|
}
|
|
|
|
}
|