gem5/src/mem/protocol/MOESI_CMP_directory-dir.sm
Nilay Vaish 734ef9a209 SLICC: Use pointers for directory entries
SLICC uses pointers for cache and TBE entries but not for directory entries.
This patch changes the protocols, SLICC and Ruby memory system so that even
directory entries are referenced using pointers.

--HG--
extra : rebase_source : abeb4ac78033d003153751f216fd1948251fcfad
2011-12-31 16:38:30 -06:00

924 lines
31 KiB
Plaintext

/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
machine(Directory, "Directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
int directory_latency = 6
{
// ** IN QUEUES **
MessageBuffer foo1, network="From", virtual_network="0", ordered="false", vnet_type="foo"; // a mod-L2 bank -> this Dir
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
MessageBuffer goo1, network="To", virtual_network="0", ordered="false", vnet_type="goo";
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Read_Write, desc="Invalid";
S, AccessPermission:Read_Only, desc="Shared";
O, AccessPermission:Maybe_Stale, desc="Owner";
M, AccessPermission:Maybe_Stale, desc="Modified";
IS, AccessPermission:Busy, desc="Blocked, was in idle";
SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
OO, AccessPermission:Busy, desc="Blocked, was in owned";
MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
MM, AccessPermission:Busy, desc="Blocked, going to modified";
MM_DMA, AccessPermission:Busy, desc="Blocked, going to I";
MI, AccessPermission:Busy, desc="Blocked on a writeback";
MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
OS, AccessPermission:Busy, desc="Blocked on a writeback";
OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
}
// Events
enumeration(Event, desc="Directory events") {
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
PUTX, desc="A PUTX arrives";
PUTO, desc="A PUTO arrives";
PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
Unblock, desc="An unblock message arrives";
Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
DMA_READ, desc="DMA Read";
DMA_WRITE, desc="DMA Write";
DMA_ACK, desc="DMA Ack";
Data, desc="Data to directory";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface='AbstractEntry') {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
structure(TBE, desc="...") {
Address PhysicalAddress, desc="Physical address for this entry";
int Len, desc="Length of request";
DataBlock DataBlk, desc="DataBlk";
MachineID Requestor, desc="original requestor";
}
structure(TBETable, external = "yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// ** OBJECTS **
TBETable TBEs, template_hack="<Directory_TBE>";
void set_tbe(TBE b);
void unset_tbe();
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
dir_entry := static_cast(Entry, "pointer",
directory.allocate(addr, new Entry));
return dir_entry;
}
State getState(TBE tbe, Address addr) {
return getDirectoryEntry(addr).DirectoryState;
}
void setState(TBE tbe, Address addr, State state) {
if (directory.isPresent(addr)) {
if (state == State:I) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
if (state == State:S) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
}
if (state == State:O) {
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
}
if (state == State:M) {
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
if ((state != State:SS) && (state != State:OO)) {
assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
}
if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
getDirectoryEntry(addr).DirectoryState := state;
// disable coherence checker
// sequencer.checkCoherence(addr);
}
else {
getDirectoryEntry(addr).DirectoryState := state;
}
}
}
AccessPermission getAccessPermission(Address addr) {
if (directory.isPresent(addr)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
}
DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
return AccessPermission:NotPresent;
}
void setAccessPermission(Address addr, State state) {
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
}
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
return getDirectoryEntry(addr).DataBlk;
}
// if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
bool isBlockShared(Address addr) {
if (directory.isPresent(addr)) {
if (getDirectoryEntry(addr).DirectoryState == State:I) {
return true;
}
}
return false;
}
bool isBlockExclusive(Address addr) {
if (directory.isPresent(addr)) {
if (getDirectoryEntry(addr).DirectoryState == State:I) {
return true;
}
}
return false;
}
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(goo1_out, ResponseMsg, goo1);
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
in_port(foo1_in, ResponseMsg, foo1) {
}
// in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
// if (unblockNetwork_in.isReady()) {
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.Address,
TBEs[in_msg.Address]);
} else {
trigger(Event:Unblock, in_msg.Address,
TBEs[in_msg.Address]);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data, in_msg.Address,
TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_ACK, in_msg.Address,
TBEs[in_msg.Address]);
} else {
error("Invalid message");
}
}
}
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
TBEs[makeLineAddress(in_msg.Address)]);
} else {
error("Invalid message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(c_clearOwner, "c", desc="Clear the owner field") {
getDirectoryEntry(address).Owner.clear();
}
action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
getDirectoryEntry(address).Owner.clear();
}
action(cc_clearSharers, "\c", desc="Clear the sharers field") {
getDirectoryEntry(address).Sharers.clear();
}
action(d_sendDataMsg, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
//out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Acks := in_msg.Acks;
if (in_msg.ReadX) {
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
} else {
out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(address).Owner.clear();
getDirectoryEntry(address).Owner.add(in_msg.Sender);
}
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := machineID;
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
// out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
out_msg.Destination.remove(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Invalidate_Control;
}
}
}
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
requestQueue_in.dequeue();
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
unblockNetwork_in.dequeue();
}
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Address, in_msg.DataBlk);
}
}
action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Address, in_msg.DataBlk);
}
}
action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
}
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
peek(unblockNetwork_in, ResponseMsg) {
getDirectoryEntry(address).Sharers.add(in_msg.Sender);
}
}
action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
}
action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
}
action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// These are not used by memory but are passed back here with the read data:
out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(unblockNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
if (is_valid(tbe)) {
out_msg.OriginalRequestorMachId := tbe.Requestor;
}
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// Not used:
out_msg.ReadX := false;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := false;
// Not used:
out_msg.ReadX := false;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
requestQueue_in.recycle();
}
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
out_msg.Type := CoherenceResponseType:DMA_ACK;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
peek(unblockNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
if (is_valid(tbe)) {
out_msg.Destination.add(tbe.Requestor);
}
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
out_msg.Type := CoherenceResponseType:DMA_ACK;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
peek(requestQueue_in, RequestMsg) {
getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
}
}
action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
assert(is_valid(tbe));
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
addressOffset(tbe.PhysicalAddress), tbe.Len);
}
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
peek (requestQueue_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.PhysicalAddress := in_msg.Address;
tbe.Len := in_msg.Len;
tbe.DataBlk := in_msg.DataBlk;
tbe.Requestor := in_msg.Requestor;
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
TBEs.deallocate(address);
unset_tbe();
}
// TRANSITIONS
transition(I, GETX, MM) {
qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition(I, DMA_READ, XI_M) {
qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition(I, DMA_WRITE, XI_U) {
qw_queueMemoryWBRequest2;
a_sendDMAAck; // ack count may be zero
l_writeDMADataToMemory;
i_popIncomingRequestQueue;
}
transition(XI_M, Memory_Data, I) {
d_sendDataMsg; // ack count may be zero
q_popMemQueue;
}
transition(XI_U, Exclusive_Unblock, I) {
cc_clearSharers;
c_clearOwner;
j_popIncomingUnblockQueue;
}
transition(S, GETX, MM) {
qf_queueMemoryFetchRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition(S, DMA_READ) {
//qf_queueMemoryFetchRequest;
p_fwdDataToDMA;
//g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
i_popIncomingRequestQueue;
}
transition(S, DMA_WRITE, XI_U) {
qw_queueMemoryWBRequest2;
a_sendDMAAck; // ack count may be zero
l_writeDMADataToMemory;
g_sendInvalidations; // the DMA will collect invalidations
i_popIncomingRequestQueue;
}
transition(I, GETS, IS) {
qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
qf_queueMemoryFetchRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition({I, S}, PUTO) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition({I, S, O}, PUTX) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition(O, GETX, MM) {
f_forwardRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition(O, DMA_READ, OD) {
f_forwardRequest; // this will cause the data to go to DMA directly
//g_sendInvalidations; // this will cause acks to be sent to the DMA
i_popIncomingRequestQueue;
}
transition(OD, DMA_ACK, O) {
j_popIncomingUnblockQueue;
}
transition({O,M}, DMA_WRITE, OI_D) {
f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
g_sendInvalidations; // these go to the DMA Controller
v_allocateTBE;
i_popIncomingRequestQueue;
}
transition(OI_D, Data, XI_U) {
qw_queueMemoryWBRequest;
a_sendDMAAck2; // ack count may be zero
p_writeFwdDataToMemory;
l_writeDMADataToMemoryFromTBE;
w_deallocateTBE;
j_popIncomingUnblockQueue;
}
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition(M, GETX, MM) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
// no exclusive unblock will show up to the directory
transition(M, DMA_READ, MD) {
f_forwardRequest; // this will cause the data to go to DMA directly
i_popIncomingRequestQueue;
}
transition(MD, DMA_ACK, M) {
j_popIncomingUnblockQueue;
}
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
transition(M, PUTX, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
// happens if M->O transition happens on-chip
transition(M, PUTO, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(M, PUTO_SHARERS, MIS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO, OS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO_SHARERS, OSS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
zz_recycleRequest;
}
transition({MM, MO}, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(MO, Unblock, O) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
zz_recycleRequest;
}
transition(IS, GETS) {
zz_recycleRequest;
}
transition(IS, Unblock, S) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition(IS, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(SS, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(SS, Last_Unblock, S) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Last_Unblock, O) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(MI, Dirty_Writeback, I) {
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Clean_Writeback, S) {
c_moveOwnerToSharer;
j_popIncomingUnblockQueue;
}
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Clean_Writeback, S) {
c_moveOwnerToSharer;
j_popIncomingUnblockQueue;
}
transition(MI, Clean_Writeback, I) {
c_clearOwner;
cc_clearSharers;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition(OS, Clean_Writeback, S) {
c_clearOwner;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition({MI, MIS}, Unblock, M) {
j_popIncomingUnblockQueue;
}
transition({OS, OSS}, Unblock, O) {
j_popIncomingUnblockQueue;
}
transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
d_sendDataMsg;
q_popMemQueue;
}
transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
//a_sendAck;
q_popMemQueue;
}
}