574 lines
18 KiB
Text
574 lines
18 KiB
Text
|
|
||
|
/*
|
||
|
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
|
||
|
* All rights reserved.
|
||
|
*
|
||
|
* Redistribution and use in source and binary forms, with or without
|
||
|
* modification, are permitted provided that the following conditions are
|
||
|
* met: redistributions of source code must retain the above copyright
|
||
|
* notice, this list of conditions and the following disclaimer;
|
||
|
* redistributions in binary form must reproduce the above copyright
|
||
|
* notice, this list of conditions and the following disclaimer in the
|
||
|
* documentation and/or other materials provided with the distribution;
|
||
|
* neither the name of the copyright holders nor the names of its
|
||
|
* contributors may be used to endorse or promote products derived from
|
||
|
* this software without specific prior written permission.
|
||
|
*
|
||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
*/
|
||
|
|
||
|
/*
|
||
|
* $Id: MOESI_CMP_directory-dir.sm 1.11 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
|
||
|
*/
|
||
|
|
||
|
machine(Directory, "Directory protocol") {
|
||
|
|
||
|
// ** IN QUEUES **
|
||
|
MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
|
||
|
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
|
||
|
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
|
||
|
|
||
|
MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
|
||
|
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
|
||
|
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
|
||
|
|
||
|
|
||
|
// STATES
|
||
|
enumeration(State, desc="Directory states", default="Directory_State_I") {
|
||
|
// Base states
|
||
|
I, desc="Invalid";
|
||
|
S, desc="Shared";
|
||
|
O, desc="Owner";
|
||
|
M, desc="Modified";
|
||
|
|
||
|
IS, desc="Blocked, was in idle";
|
||
|
SS, desc="Blocked, was in shared";
|
||
|
OO, desc="Blocked, was in owned";
|
||
|
MO, desc="Blocked, going to owner or maybe modified";
|
||
|
MM, desc="Blocked, going to modified";
|
||
|
|
||
|
MI, desc="Blocked on a writeback";
|
||
|
MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
|
||
|
OS, desc="Blocked on a writeback";
|
||
|
OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
|
||
|
}
|
||
|
|
||
|
// Events
|
||
|
enumeration(Event, desc="Directory events") {
|
||
|
GETX, desc="A GETX arrives";
|
||
|
GETS, desc="A GETS arrives";
|
||
|
PUTX, desc="A PUTX arrives";
|
||
|
PUTO, desc="A PUTO arrives";
|
||
|
PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
|
||
|
Unblock, desc="An unblock message arrives";
|
||
|
Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
|
||
|
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
|
||
|
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
|
||
|
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
|
||
|
}
|
||
|
|
||
|
// TYPES
|
||
|
|
||
|
// DirectoryEntry
|
||
|
structure(Entry, desc="...") {
|
||
|
State DirectoryState, desc="Directory state";
|
||
|
DataBlock DataBlk, desc="data for the block";
|
||
|
NetDest Sharers, desc="Sharers for this block";
|
||
|
NetDest Owner, desc="Owner of this block";
|
||
|
int WaitingUnblocks, desc="Number of acks we're waiting for";
|
||
|
}
|
||
|
|
||
|
external_type(DirectoryMemory) {
|
||
|
Entry lookup(Address);
|
||
|
bool isPresent(Address);
|
||
|
}
|
||
|
|
||
|
|
||
|
// ** OBJECTS **
|
||
|
|
||
|
DirectoryMemory directory, constructor_hack="i";
|
||
|
|
||
|
State getState(Address addr) {
|
||
|
return directory[addr].DirectoryState;
|
||
|
}
|
||
|
|
||
|
void setState(Address addr, State state) {
|
||
|
if (directory.isPresent(addr)) {
|
||
|
|
||
|
if (state == State:I) {
|
||
|
assert(directory[addr].Owner.count() == 0);
|
||
|
assert(directory[addr].Sharers.count() == 0);
|
||
|
}
|
||
|
|
||
|
if (state == State:S) {
|
||
|
assert(directory[addr].Owner.count() == 0);
|
||
|
}
|
||
|
|
||
|
if (state == State:O) {
|
||
|
assert(directory[addr].Owner.count() == 1);
|
||
|
assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
|
||
|
}
|
||
|
|
||
|
if (state == State:M) {
|
||
|
assert(directory[addr].Owner.count() == 1);
|
||
|
assert(directory[addr].Sharers.count() == 0);
|
||
|
}
|
||
|
|
||
|
if ((state != State:SS) && (state != State:OO)) {
|
||
|
assert(directory[addr].WaitingUnblocks == 0);
|
||
|
}
|
||
|
|
||
|
if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
|
||
|
directory[addr].DirectoryState := state;
|
||
|
// disable coherence checker
|
||
|
// sequencer.checkCoherence(addr);
|
||
|
}
|
||
|
else {
|
||
|
directory[addr].DirectoryState := state;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
|
||
|
bool isBlockShared(Address addr) {
|
||
|
if (directory.isPresent(addr)) {
|
||
|
if (directory[addr].DirectoryState == State:I) {
|
||
|
return true;
|
||
|
}
|
||
|
}
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
bool isBlockExclusive(Address addr) {
|
||
|
if (directory.isPresent(addr)) {
|
||
|
if (directory[addr].DirectoryState == State:I) {
|
||
|
return true;
|
||
|
}
|
||
|
}
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
|
||
|
// ** OUT_PORTS **
|
||
|
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
|
||
|
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
|
||
|
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
|
||
|
out_port(goo1_out, ResponseMsg, goo1);
|
||
|
|
||
|
// ** IN_PORTS **
|
||
|
|
||
|
in_port(foo1_in, ResponseMsg, foo1) {
|
||
|
|
||
|
}
|
||
|
|
||
|
// in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
|
||
|
// if (unblockNetwork_in.isReady()) {
|
||
|
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
|
||
|
if (unblockNetwork_in.isReady()) {
|
||
|
peek(unblockNetwork_in, ResponseMsg) {
|
||
|
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||
|
if (directory[in_msg.Address].WaitingUnblocks == 1) {
|
||
|
trigger(Event:Last_Unblock, in_msg.Address);
|
||
|
} else {
|
||
|
trigger(Event:Unblock, in_msg.Address);
|
||
|
}
|
||
|
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
||
|
trigger(Event:Exclusive_Unblock, in_msg.Address);
|
||
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||
|
trigger(Event:Dirty_Writeback, in_msg.Address);
|
||
|
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
||
|
trigger(Event:Clean_Writeback, in_msg.Address);
|
||
|
} else {
|
||
|
error("Invalid message");
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
||
|
if (requestQueue_in.isReady()) {
|
||
|
peek(requestQueue_in, RequestMsg) {
|
||
|
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||
|
trigger(Event:GETS, in_msg.Address);
|
||
|
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||
|
trigger(Event:GETX, in_msg.Address);
|
||
|
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||
|
trigger(Event:PUTX, in_msg.Address);
|
||
|
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
||
|
trigger(Event:PUTO, in_msg.Address);
|
||
|
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
||
|
trigger(Event:PUTO_SHARERS, in_msg.Address);
|
||
|
} else {
|
||
|
error("Invalid message");
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// Actions
|
||
|
|
||
|
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
||
|
peek(requestQueue_in, RequestMsg) {
|
||
|
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
|
||
|
out_msg.Address := address;
|
||
|
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||
|
out_msg.Requestor := in_msg.Requestor;
|
||
|
out_msg.Destination.add(in_msg.Requestor);
|
||
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
||
|
peek(requestQueue_in, RequestMsg) {
|
||
|
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
|
||
|
out_msg.Address := address;
|
||
|
out_msg.Type := CoherenceRequestType:WB_NACK;
|
||
|
out_msg.Requestor := in_msg.Requestor;
|
||
|
out_msg.Destination.add(in_msg.Requestor);
|
||
|
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(c_clearOwner, "c", desc="Clear the owner field") {
|
||
|
directory[address].Owner.clear();
|
||
|
}
|
||
|
|
||
|
action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
|
||
|
directory[address].Sharers.addNetDest(directory[address].Owner);
|
||
|
directory[address].Owner.clear();
|
||
|
}
|
||
|
|
||
|
action(cc_clearSharers, "\c", desc="Clear the sharers field") {
|
||
|
directory[address].Sharers.clear();
|
||
|
}
|
||
|
|
||
|
action(d_sendData, "d", desc="Send data to requestor") {
|
||
|
peek(requestQueue_in, RequestMsg) {
|
||
|
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
|
||
|
// enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
|
||
|
out_msg.Address := address;
|
||
|
|
||
|
if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
|
||
|
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||
|
} else {
|
||
|
out_msg.Type := CoherenceResponseType:DATA;
|
||
|
}
|
||
|
|
||
|
out_msg.Sender := machineID;
|
||
|
out_msg.SenderMachine := MachineType:Directory;
|
||
|
out_msg.Destination.add(in_msg.Requestor);
|
||
|
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
|
||
|
out_msg.Dirty := false; // By definition, the block is now clean
|
||
|
out_msg.Acks := directory[address].Sharers.count();
|
||
|
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
|
||
|
out_msg.Acks := out_msg.Acks - 1;
|
||
|
}
|
||
|
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
|
||
|
peek(unblockNetwork_in, ResponseMsg) {
|
||
|
directory[address].Owner.clear();
|
||
|
directory[address].Owner.add(in_msg.Sender);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(f_forwardRequest, "f", desc="Forward request to owner") {
|
||
|
peek(requestQueue_in, RequestMsg) {
|
||
|
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
|
||
|
out_msg.Address := address;
|
||
|
out_msg.Type := in_msg.Type;
|
||
|
out_msg.Requestor := in_msg.Requestor;
|
||
|
out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
|
||
|
out_msg.Acks := directory[address].Sharers.count();
|
||
|
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
|
||
|
out_msg.Acks := out_msg.Acks - 1;
|
||
|
}
|
||
|
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
|
||
|
peek(requestQueue_in, RequestMsg) {
|
||
|
if ((directory[in_msg.Address].Sharers.count() > 1) ||
|
||
|
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
|
||
|
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
|
||
|
out_msg.Address := address;
|
||
|
out_msg.Type := CoherenceRequestType:INV;
|
||
|
out_msg.Requestor := in_msg.Requestor;
|
||
|
// out_msg.Destination := directory[in_msg.Address].Sharers;
|
||
|
out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
|
||
|
out_msg.Destination.remove(in_msg.Requestor);
|
||
|
out_msg.MessageSize := MessageSizeType:Invalidate_Control;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
|
||
|
requestQueue_in.dequeue();
|
||
|
}
|
||
|
|
||
|
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
|
||
|
unblockNetwork_in.dequeue();
|
||
|
}
|
||
|
|
||
|
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
|
||
|
peek(unblockNetwork_in, ResponseMsg) {
|
||
|
assert(in_msg.Dirty);
|
||
|
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
|
||
|
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
|
||
|
DEBUG_EXPR(in_msg.Address);
|
||
|
DEBUG_EXPR(in_msg.DataBlk);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
|
||
|
peek(unblockNetwork_in, ResponseMsg) {
|
||
|
assert(in_msg.Dirty == false);
|
||
|
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
|
||
|
|
||
|
// NOTE: The following check would not be valid in a real
|
||
|
// implementation. We include the data in the "dataless"
|
||
|
// message so we can assert the clean data matches the datablock
|
||
|
// in memory
|
||
|
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
|
||
|
peek(unblockNetwork_in, ResponseMsg) {
|
||
|
directory[address].Sharers.add(in_msg.Sender);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
|
||
|
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
|
||
|
}
|
||
|
|
||
|
action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
|
||
|
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
|
||
|
assert(directory[address].WaitingUnblocks >= 0);
|
||
|
}
|
||
|
|
||
|
// action(z_stall, "z", desc="Cannot be handled right now.") {
|
||
|
// Special name recognized as do nothing case
|
||
|
// }
|
||
|
|
||
|
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
|
||
|
requestQueue_in.recycle();
|
||
|
}
|
||
|
|
||
|
// TRANSITIONS
|
||
|
|
||
|
transition(I, GETX, MM) {
|
||
|
d_sendData;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(S, GETX, MM) {
|
||
|
d_sendData;
|
||
|
g_sendInvalidations;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(I, GETS, IS) {
|
||
|
d_sendData;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition({S, SS}, GETS, SS) {
|
||
|
d_sendData;
|
||
|
n_incrementOutstanding;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition({I, S}, PUTO) {
|
||
|
b_sendWriteBackNack;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition({I, S, O}, PUTX) {
|
||
|
b_sendWriteBackNack;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(O, GETX, MM) {
|
||
|
f_forwardRequest;
|
||
|
g_sendInvalidations;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition({O, OO}, GETS, OO) {
|
||
|
f_forwardRequest;
|
||
|
n_incrementOutstanding;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(M, GETX, MM) {
|
||
|
f_forwardRequest;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(M, GETS, MO) {
|
||
|
f_forwardRequest;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(M, PUTX, MI) {
|
||
|
a_sendWriteBackAck;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
// happens if M->O transition happens on-chip
|
||
|
transition(M, PUTO, MI) {
|
||
|
a_sendWriteBackAck;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(M, PUTO_SHARERS, MIS) {
|
||
|
a_sendWriteBackAck;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(O, PUTO, OS) {
|
||
|
a_sendWriteBackAck;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
transition(O, PUTO_SHARERS, OSS) {
|
||
|
a_sendWriteBackAck;
|
||
|
i_popIncomingRequestQueue;
|
||
|
}
|
||
|
|
||
|
|
||
|
transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
|
||
|
zz_recycleRequest;
|
||
|
}
|
||
|
|
||
|
transition({MM, MO}, Exclusive_Unblock, M) {
|
||
|
cc_clearSharers;
|
||
|
e_ownerIsUnblocker;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(MO, Unblock, O) {
|
||
|
m_addUnlockerToSharers;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
|
||
|
zz_recycleRequest;
|
||
|
}
|
||
|
|
||
|
transition(IS, GETS) {
|
||
|
zz_recycleRequest;
|
||
|
}
|
||
|
|
||
|
transition(IS, Unblock, S) {
|
||
|
m_addUnlockerToSharers;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(IS, Exclusive_Unblock, M) {
|
||
|
cc_clearSharers;
|
||
|
e_ownerIsUnblocker;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(SS, Unblock) {
|
||
|
m_addUnlockerToSharers;
|
||
|
o_decrementOutstanding;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(SS, Last_Unblock, S) {
|
||
|
m_addUnlockerToSharers;
|
||
|
o_decrementOutstanding;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(OO, Unblock) {
|
||
|
m_addUnlockerToSharers;
|
||
|
o_decrementOutstanding;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(OO, Last_Unblock, O) {
|
||
|
m_addUnlockerToSharers;
|
||
|
o_decrementOutstanding;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(MI, Dirty_Writeback, I) {
|
||
|
c_clearOwner;
|
||
|
cc_clearSharers;
|
||
|
l_writeDataToMemory;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(MIS, Dirty_Writeback, S) {
|
||
|
c_moveOwnerToSharer;
|
||
|
l_writeDataToMemory;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(MIS, Clean_Writeback, S) {
|
||
|
c_moveOwnerToSharer;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(OS, Dirty_Writeback, S) {
|
||
|
c_clearOwner;
|
||
|
l_writeDataToMemory;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(OSS, Dirty_Writeback, S) {
|
||
|
c_moveOwnerToSharer;
|
||
|
l_writeDataToMemory;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(OSS, Clean_Writeback, S) {
|
||
|
c_moveOwnerToSharer;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(MI, Clean_Writeback, I) {
|
||
|
c_clearOwner;
|
||
|
cc_clearSharers;
|
||
|
ll_checkDataInMemory;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition(OS, Clean_Writeback, S) {
|
||
|
c_clearOwner;
|
||
|
ll_checkDataInMemory;
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition({MI, MIS}, Unblock, M) {
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
|
||
|
transition({OS, OSS}, Unblock, O) {
|
||
|
j_popIncomingUnblockQueue;
|
||
|
}
|
||
|
}
|