gem5/src/mem/protocol/MOESI_SMP_token-dir.sm
Nathan Binkert 2f30950143 ruby: Import ruby and slicc from GEMS
We eventually plan to replace the m5 cache hierarchy with the GEMS
hierarchy, but for now we will make both live alongside eachother.
2009-05-11 10:38:43 -07:00

405 lines
14 KiB
Text

/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_token-dir.sm 1.5 04/11/17 14:07:50-06:00 mikem@emperor15.cs.wisc.edu $
*/
machine(Directory, "Token protocol") {
MessageBuffer responseFromDir, network="To", virtual_network="0", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="0", ordered="false";
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
MessageBuffer persistentToDir, network="From", virtual_network="2", ordered="true";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_O") {
// Base states
O, desc="Owner";
NO, desc="Not Owner";
L, desc="Locked";
}
// Events
enumeration(Event, desc="Directory events") {
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
Lockdown, desc="A lockdown request arrives";
Unlockdown, desc="An un-lockdown request arrives";
Data_Owner, desc="Data arrive, includes the owner token";
Data_Shared, desc="Data arrive, does not include the owner token";
Ack, desc="Tokens arrive";
Ack_Owner, desc="Tokens arrive, including the owner token";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
PersistentTable persistentTable, constructor_hack="i";
State getState(Address addr) {
return directory[addr].DirectoryState;
}
void setState(Address addr, State state) {
directory[addr].DirectoryState := state;
if (state == State:L) {
assert(directory[addr].Tokens == 0);
}
// Make sure the token count is in range
assert(directory[addr].Tokens >= 0);
assert(directory[addr].Tokens <= max_tokens());
if (state == State:O) {
assert(directory[addr].Tokens >= 1); // Must have at least one token
assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
}
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// ** IN_PORTS **
in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
if (persistentNetwork_in.isReady()) {
peek(persistentNetwork_in, PersistentMsg) {
// Apply the lockdown or unlockdown message to the table
if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
} else {
error("Invalid message");
}
// React to the message based on the current state of the table
if (persistentTable.isLocked(in_msg.Address)) {
trigger(Event:Lockdown, in_msg.Address); // locked
} else {
trigger(Event:Unlockdown, in_msg.Address); // unlocked
}
}
}
}
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
trigger(Event:Data_Shared, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
// Actions
action(a_sendTokens, "a", desc="Send tokens to requestor") {
// Only send a message if we have tokens to send
if (directory[address].Tokens > 0) {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := directory[in_msg.Address].Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
directory[address].Tokens := 0;
}
}
action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
// Only send a message if we have tokens to send
if (directory[address].Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := directory[address].Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
directory[address].Tokens := 0;
}
}
action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
assert(directory[address].Tokens > 0);
out_msg.Tokens := directory[in_msg.Address].Tokens;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
directory[address].Tokens := 0;
}
action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
assert(directory[address].Tokens > 0);
out_msg.Tokens := directory[address].Tokens;
out_msg.DataBlk := directory[address].DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
directory[address].Tokens := 0;
}
action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Tokens >= 1);
directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
responseNetwork_in.dequeue();
}
action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
persistentNetwork_in.dequeue();
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
}
}
action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := in_msg.Dirty;
out_msg.MessageSize := in_msg.MessageSize;
}
}
}
action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
// Bounce the message, but "re-associate" the data and the owner
// token. In essence we're converting an ACK_OWNER message to a
// DATA_OWNER message, keeping the number of tokens the same.
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := in_msg.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
// TRANSITIONS
// Trans. from O
transition(O, GETX, NO) {
d_sendDataWithAllTokens;
j_popIncomingRequestQueue;
}
transition(O, GETS, NO) {
d_sendDataWithAllTokens;
// Since we found the owner, no need to forward
j_popIncomingRequestQueue;
}
transition(O, Lockdown, L) {
dd_sendDataWithAllTokensToStarver;
l_popIncomingPersistentQueue;
}
transition(O, {Data_Shared, Ack}) {
f_incrementTokens;
k_popIncomingResponseQueue;
}
// Trans. from NO
transition(NO, GETX) {
a_sendTokens;
j_popIncomingRequestQueue;
}
transition(NO, GETS) {
j_popIncomingRequestQueue;
}
transition(NO, Lockdown, L) {
aa_sendTokensToStarver;
l_popIncomingPersistentQueue;
}
transition(NO, Data_Owner, O) {
m_writeDataToMemory;
f_incrementTokens;
k_popIncomingResponseQueue;
}
transition(NO, Ack_Owner, O) {
n_checkIncomingMsg;
f_incrementTokens;
k_popIncomingResponseQueue;
}
transition(NO, {Data_Shared, Ack}) {
f_incrementTokens;
k_popIncomingResponseQueue;
}
// Trans. from L
transition(L, {GETX, GETS}) {
j_popIncomingRequestQueue;
}
transition(L, Lockdown) {
l_popIncomingPersistentQueue;
}
transition(L, {Data_Owner, Data_Shared, Ack}) {
r_bounceResponse;
k_popIncomingResponseQueue;
}
transition(L, Ack_Owner) {
s_bounceDatalessOwnerToken;
k_popIncomingResponseQueue;
}
transition(L, Unlockdown, NO) {
l_popIncomingPersistentQueue;
}
}