ruby: removed unsupported protocol files

This commit is contained in:
Brad Beckmann 2011-02-23 16:41:26 -08:00
parent 72fb282ab1
commit c09a33e5d5
35 changed files with 0 additions and 14391 deletions

View file

@ -1,894 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MSI_MOSI_CMP_directory-L1cache.sm 1.10 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
*
*/
machine(L1Cache, "MSI Directory L1 Cache CMP") {
// NODE L1 CACHE
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
// a local L1 -> this L2 bank
MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
MessageBuffer unblockFromL1Cache, network="To", virtual_network="4", ordered="false";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
// a L2 bank -> this L1
MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
// STATES
enumeration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
NP, desc="Not present in either cache";
I, desc="a L1 cache entry Idle";
S, desc="a L1 cache entry Shared";
E, desc="a L1 cache entry Exclusive";
M, desc="a L1 cache entry Modified", format="!b";
// Transient States
IS, desc="L1 idle, issued GETS, have not seen response yet";
IM, desc="L1 idle, issued GETX, have not seen response yet";
SM, desc="L1 idle, issued GETX, have not seen response yet";
IS_I, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
M_I, desc="L1 replacing, waiting for ACK";
E_I, desc="L1 replacing, waiting for ACK";
}
// EVENTS
enumeration(Event, desc="Cache events") {
// L1 events
Load, desc="Load request from the home processor";
Ifetch, desc="I-fetch request from the home processor";
Store, desc="Store request from the home processor";
Inv, desc="Invalidate request from L2 bank";
// internal generated request
L1_Replacement, desc="L1 Replacement", format="!r";
// other requests
Fwd_GETX, desc="GETX from other processor";
Fwd_GETS, desc="GETS from other processor";
Fwd_GET_INSTR, desc="GET_INSTR from other processor";
Data, desc="Data for processor";
Data_Exclusive, desc="Data for processor";
DataS_fromL1, desc="data for GETS request, need to unblock directory";
Data_all_Acks, desc="Data for processor, all acks";
Ack, desc="Ack for processor";
Ack_all, desc="Last ack for processor";
WB_Ack, desc="Ack for replacement";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="data for the block";
bool Dirty, default="false", desc="data is dirty";
}
// TBE fields
structure(TBE, desc="...") {
Address Address, desc="Physical address for this TBE";
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="Buffer for the data block";
bool Dirty, default="false", desc="data is dirty";
bool isPrefetch, desc="Set if this was caused by a prefetch";
int pendingAcks, default="0", desc="number of pending acks";
}
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
}
external_type(TBETable) {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
int cache_state_to_int(State state);
// inclusive cache returns L1 entries only
Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory[addr];
} else {
return L1IcacheMemory[addr];
}
}
void changeL1Permission(Address addr, AccessPermission permission) {
if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory.changePermission(addr, permission);
} else if(L1IcacheMemory.isTagPresent(addr)) {
return L1IcacheMemory.changePermission(addr, permission);
} else {
error("cannot change permission, L1 block not present");
}
}
bool isL1CacheTagPresent(Address addr) {
return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
}
State getState(Address addr) {
if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
DEBUG_EXPR(id);
DEBUG_EXPR(addr);
}
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
if(L1_TBEs.isPresent(addr)) {
return L1_TBEs[addr].TBEState;
} else if (isL1CacheTagPresent(addr)) {
return getL1CacheEntry(addr).CacheState;
}
return State:NP;
}
void setState(Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
// MUST CHANGE
if(L1_TBEs.isPresent(addr)) {
L1_TBEs[addr].TBEState := state;
}
if (isL1CacheTagPresent(addr)) {
getL1CacheEntry(addr).CacheState := state;
// Set permission
if (state == State:I) {
changeL1Permission(addr, AccessPermission:Invalid);
} else if (state == State:S || state == State:E) {
changeL1Permission(addr, AccessPermission:Read_Only);
} else if (state == State:M) {
changeL1Permission(addr, AccessPermission:Read_Write);
} else {
changeL1Permission(addr, AccessPermission:Busy);
}
}
}
Event mandatory_request_type_to_event(CacheRequestType type) {
if (type == CacheRequestType:LD) {
return Event:Load;
} else if (type == CacheRequestType:IFETCH) {
return Event:Ifetch;
} else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
return Event:Store;
} else {
error("Invalid CacheRequestType");
}
}
GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
if (machineIDToMachineType(sender) == MachineType:L1Cache) {
return GenericMachineType:L1Cache_wCC; // NOTE direct L1 hits should not call this
} else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
return GenericMachineType:L2Cache;
} else {
return ConvertMachToGenericMach(machineIDToMachineType(sender));
}
}
out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
// Response IntraChip L1 Network - response msg to this L1 cache
in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
if (responseIntraChipL1Network_in.isReady()) {
peek(responseIntraChipL1Network_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.Address);
} else if(in_msg.Type == CoherenceResponseType:DATA) {
if ( (getState(in_msg.Address) == State:IS || getState(in_msg.Address) == State:IS_I) &&
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache ) {
trigger(Event:DataS_fromL1, in_msg.Address);
} else if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
trigger(Event:Data_all_Acks, in_msg.Address);
} else {
trigger(Event:Data, in_msg.Address);
}
} else if (in_msg.Type == CoherenceResponseType:ACK) {
if ( (L1_TBEs[in_msg.Address].pendingAcks - in_msg.AckCount) == 0 ) {
trigger(Event:Ack_all, in_msg.Address);
} else {
trigger(Event:Ack, in_msg.Address);
}
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
trigger(Event:WB_Ack, in_msg.Address);
} else {
error("Invalid L1 response type");
}
}
}
}
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
if(requestIntraChipL1Network_in.isReady()) {
peek(requestIntraChipL1Network_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
// upgrade transforms to GETX due to race
trigger(Event:Fwd_GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
trigger(Event:Fwd_GET_INSTR, in_msg.Address);
} else {
error("Invalid forwarded request type");
}
}
}
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg) {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
if (in_msg.Type == CacheRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.Address);
}
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.Address);
}
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
}
}
}
}
}
}
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
DEBUG_EXPR(machineID);
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:UPGRADE;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(d_sendDataToRequestor, "d", desc="send data to requestor") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
out_msg.Dirty := getL1CacheEntry(address).Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
out_msg.Dirty := getL1CacheEntry(address).Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := L1_TBEs[address].DataBlk;
out_msg.Dirty := L1_TBEs[address].Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := L1_TBEs[address].DataBlk;
out_msg.Dirty := L1_TBEs[address].Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
out_msg.Dirty := getL1CacheEntry(address).Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
}
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := L1_TBEs[address].DataBlk;
out_msg.Dirty := L1_TBEs[address].Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
}
action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Control;
out_msg.AckCount := 1;
}
}
}
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
out_msg.Dirty := getL1CacheEntry(address).Dirty;
out_msg.Requestor:= machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
if (getL1CacheEntry(address).Dirty) {
out_msg.MessageSize := MessageSizeType:Writeback_Data;
} else {
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
enqueue(unblockNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
}
action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
sequencer.readCallback(address, getL1CacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
}
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
getL1CacheEntry(address).Dirty := true;
}
action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
}
getL1CacheEntry(address).Dirty := true;
}
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
L1_TBEs[address].isPrefetch := false;
L1_TBEs[address].Dirty := getL1CacheEntry(address).Dirty;
L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
L1_TBEs.deallocate(address);
}
action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
getL1CacheEntry(address).Dirty := in_msg.Dirty;
}
}
action(q_updateAckCount, "q", desc="Update ack count") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
L1_TBEs[address].pendingAcks := L1_TBEs[address].pendingAcks - in_msg.AckCount;
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
APPEND_TRANSITION_COMMENT(" p: ");
APPEND_TRANSITION_COMMENT(L1_TBEs[address].pendingAcks);
}
}
action(z_stall, "z", desc="Stall") {
}
action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
}
}
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
if (L1DcacheMemory.isTagPresent(address) == false) {
L1DcacheMemory.allocate(address);
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
if (L1IcacheMemory.isTagPresent(address) == false) {
L1IcacheMemory.allocate(address);
}
}
action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
requestIntraChipL1Network_in.recycle();
}
action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
mandatoryQueue_in.recycle();
}
//*****************************************************
// TRANSITIONS
//*****************************************************
// Transitions for Load/Store/Replacement/WriteBack from transient states
transition({IS, IM, IS_I, M_I, E_I, SM}, {Load, Ifetch, Store, L1_Replacement}) {
z_recycleMandatoryQueue;
}
// Transitions from Idle
transition({NP,I}, L1_Replacement) {
ff_deallocateL1CacheBlock;
}
transition({NP,I}, Load, IS) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
k_popMandatoryQueue;
}
transition({NP,I}, Ifetch, IS) {
pp_allocateL1ICacheBlock;
i_allocateTBE;
ai_issueGETINSTR;
k_popMandatoryQueue;
}
transition({NP,I}, Store, IM) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
k_popMandatoryQueue;
}
transition({NP, I}, Inv) {
fi_sendInvAck;
l_popRequestQueue;
}
// Transitions from Shared
transition(S, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(S, Store, SM) {
i_allocateTBE;
c_issueUPGRADE;
k_popMandatoryQueue;
}
transition(S, L1_Replacement, I) {
ff_deallocateL1CacheBlock;
}
transition(S, Inv, I) {
fi_sendInvAck;
l_popRequestQueue;
}
// Transitions from Exclusive
transition(E, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(E, Store, M) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(E, L1_Replacement, M_I) {
// silent E replacement??
i_allocateTBE;
g_issuePUTX; // send data, but hold in case forwarded request
ff_deallocateL1CacheBlock;
}
transition(E, Inv, I) {
// don't send data
fi_sendInvAck;
l_popRequestQueue;
}
transition(E, Fwd_GETX, I) {
d_sendDataToRequestor;
l_popRequestQueue;
}
transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
d_sendDataToRequestor;
d2_sendDataToL2;
l_popRequestQueue;
}
// Transitions from Modified
transition(M, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(M, Store) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(M, L1_Replacement, M_I) {
i_allocateTBE;
g_issuePUTX; // send data, but hold in case forwarded request
ff_deallocateL1CacheBlock;
}
transition(M_I, WB_Ack, I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(M, Inv, I) {
f_sendDataToL2;
l_popRequestQueue;
}
transition(M_I, Inv, I) {
ft_sendDataToL2_fromTBE;
s_deallocateTBE;
l_popRequestQueue;
}
transition(M, Fwd_GETX, I) {
d_sendDataToRequestor;
l_popRequestQueue;
}
transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
d_sendDataToRequestor;
d2_sendDataToL2;
l_popRequestQueue;
}
transition(M_I, Fwd_GETX, I) {
dt_sendDataToRequestor_fromTBE;
s_deallocateTBE;
l_popRequestQueue;
}
transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, I) {
dt_sendDataToRequestor_fromTBE;
d2t_sendDataToL2_fromTBE;
s_deallocateTBE;
l_popRequestQueue;
}
// Transitions from IS
transition({IS, IS_I}, Inv, IS_I) {
fi_sendInvAck;
l_popRequestQueue;
}
transition(IS, Data_all_Acks, S) {
u_writeDataToL1Cache;
x_external_load_hit;
s_deallocateTBE;
j_sendUnblock;
o_popIncomingResponseQueue;
}
transition(IS_I, Data_all_Acks, I) {
u_writeDataToL1Cache;
x_external_load_hit;
s_deallocateTBE;
j_sendUnblock;
o_popIncomingResponseQueue;
}
transition(IS, DataS_fromL1, S) {
u_writeDataToL1Cache;
j_sendUnblock;
x_external_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(IS_I, DataS_fromL1, I) {
u_writeDataToL1Cache;
j_sendUnblock;
x_external_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// directory is blocked when sending exclusive data
transition(IS_I, Data_Exclusive, E) {
u_writeDataToL1Cache;
x_external_load_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(IS, Data_Exclusive, E) {
u_writeDataToL1Cache;
x_external_load_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// Transitions from IM
transition({IM, SM}, Inv, IM) {
fi_sendInvAck;
l_popRequestQueue;
}
transition(IM, Data, SM) {
u_writeDataToL1Cache;
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(IM, Data_all_Acks, M) {
u_writeDataToL1Cache;
xx_external_store_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// transitions from SM
transition({SM, IM}, Ack) {
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(SM, Ack_all, M) {
jj_sendExclusiveUnblock;
xx_external_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,166 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
machine(Directory, "Token protocol") {
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, desc="Owner";
}
// Events
enumeration(Event, desc="Directory events") {
Fetch, desc="A GETX arrives";
Data, desc="A GETS arrives";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
DataBlock DataBlk, desc="data for the block";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
State getState(Address addr) {
return State:I;
}
void setState(Address addr, State state) {
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fetch, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Fetch, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
trigger(Event:Data, in_msg.Address);
} else {
DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Sender);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
responseNetwork_in.dequeue();
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
// TRANSITIONS
transition(I, Fetch) {
d_sendData;
j_popIncomingRequestQueue;
}
transition(I, Data) {
m_writeDataToMemory;
a_sendAck;
k_popIncomingResponseQueue;
}
}

View file

@ -1,112 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MSI_MOSI_CMP_directory-msg.sm 1.5 05/01/19 15:48:37-06:00 mikem@royal16.cs.wisc.edu $
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
UPGRADE, desc="UPGRADE to exclusive";
GETS, desc="Get Shared";
GET_INSTR, desc="Get Instruction";
INV, desc="INValidate";
PUTX, desc="replacement message";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
MEMORY_ACK, desc="Ack from memory controller";
DATA, desc="Data";
DATA_EXCLUSIVE, desc="Data";
MEMORY_DATA, desc="Data";
ACK, desc="Generic invalidate ack";
WB_ACK, desc="writeback ack";
UNBLOCK, desc="unblock";
EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
}
// RequestMsg
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
AccessModeType AccessMode, desc="user/supervisor access type";
MachineID Requestor , desc="What component request";
NetDest Destination, desc="What components receive the request, includes MachineType and num";
MessageSizeType MessageSize, desc="size category of the message";
DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
bool Dirty, default="false", desc="Dirty bit";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// ResponseMsg
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="What component sent the data";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="Data for the cache line";
bool Dirty, default="false", desc="Dirty bit";
int AckCount, default="0", desc="number of acks in this message";
MessageSizeType MessageSize, desc="size category of the message";
}
/*
GenericRequestType convertToGenericType(CoherenceRequestType type) {
if(type == CoherenceRequestType:PUTX) {
return GenericRequestType:PUTX;
} else if(type == CoherenceRequestType:GETS) {
return GenericRequestType:GETS;
} else if(type == CoherenceRequestType:GET_INSTR) {
return GenericRequestType:GET_INSTR;
} else if(type == CoherenceRequestType:GETX) {
return GenericRequestType:GETX;
} else if(type == CoherenceRequestType:UPGRADE) {
return GenericRequestType:UPGRADE;
} else if(type == CoherenceRequestType:PUTS) {
return GenericRequestType:PUTS;
} else if(type == CoherenceRequestType:INV) {
return GenericRequestType:INV;
} else if(type == CoherenceRequestType:INV_S) {
return GenericRequestType:INV_S;
} else if(type == CoherenceRequestType:L1_DG) {
return GenericRequestType:DOWNGRADE;
} else if(type == CoherenceRequestType:WB_ACK) {
return GenericRequestType:WB_ACK;
} else if(type == CoherenceRequestType:EXE_ACK) {
return GenericRequestType:EXE_ACK;
} else {
DEBUG_EXPR(type);
error("invalid CoherenceRequestType");
}
}
*/

View file

@ -1,5 +0,0 @@
MESI_SCMP_bankdirectory-msg.sm
MESI_SCMP_bankdirectory-L2cache.sm
MESI_SCMP_bankdirectory-L1cache.sm
MESI_SCMP_bankdirectory-mem.sm
standard_CMP-protocol.sm

View file

@ -1,250 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
// This file is copied from Yasuko Watanabe's prefetch / memory protocol
// Copied here by aep 12/14/07
machine(Directory, "MESI_SCMP_bankdirectory protocol") {
MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="3", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, desc="Owner";
}
// Events
enumeration(Event, desc="Directory events") {
Fetch, desc="A memory fetch arrives";
Data, desc="writeback data arrives";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
DataBlock DataBlk, desc="data for the block";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// to simulate detailed DRAM
external_type(MemoryControl, inport="yes", outport="yes") {
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
MemoryControl memBuffer, constructor_hack="i";
State getState(Address addr) {
return State:I;
}
void setState(Address addr, State state) {
}
bool isGETRequest(CoherenceRequestType type) {
return (type == CoherenceRequestType:GETS) ||
(type == CoherenceRequestType:GET_INSTR) ||
(type == CoherenceRequestType:GETX);
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
trigger(Event:Fetch, in_msg.Address);
} else {
DEBUG_EXPR(in_msg);
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
trigger(Event:Data, in_msg.Address);
} else {
DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
responseNetwork_in.dequeue();
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
DEBUG_EXPR(out_msg);
}
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Sender;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DEBUG_EXPR(out_msg);
}
}
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
// TRANSITIONS
transition(I, Fetch) {
//d_sendData;
qf_queueMemoryFetchRequest;
j_popIncomingRequestQueue;
}
transition(I, Data) {
m_writeDataToMemory;
//a_sendAck;
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(I, Memory_Data) {
d_sendData;
l_popMemQueue;
}
transition(I, Memory_Ack) {
a_sendAck;
l_popMemQueue;
}
}

View file

@ -1,5 +0,0 @@
MESI_SCMP_bankdirectory-msg.sm
MESI_SCMP_bankdirectory-L2cache.sm
MESI_SCMP_bankdirectory-L1cache.sm
MESI_SCMP_bankdirectory_m-mem.sm
standard_CMP-protocol.sm

View file

@ -1,573 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_CMP_directory-dir.sm 1.11 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
machine(Directory, "Directory protocol") {
// ** IN QUEUES **
MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, desc="Invalid";
S, desc="Shared";
O, desc="Owner";
M, desc="Modified";
IS, desc="Blocked, was in idle";
SS, desc="Blocked, was in shared";
OO, desc="Blocked, was in owned";
MO, desc="Blocked, going to owner or maybe modified";
MM, desc="Blocked, going to modified";
MI, desc="Blocked on a writeback";
MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
OS, desc="Blocked on a writeback";
OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
}
// Events
enumeration(Event, desc="Directory events") {
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
PUTX, desc="A PUTX arrives";
PUTO, desc="A PUTO arrives";
PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
Unblock, desc="An unblock message arrives";
Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
State getState(Address addr) {
return directory[addr].DirectoryState;
}
void setState(Address addr, State state) {
if (directory.isPresent(addr)) {
if (state == State:I) {
assert(directory[addr].Owner.count() == 0);
assert(directory[addr].Sharers.count() == 0);
}
if (state == State:S) {
assert(directory[addr].Owner.count() == 0);
}
if (state == State:O) {
assert(directory[addr].Owner.count() == 1);
assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
}
if (state == State:M) {
assert(directory[addr].Owner.count() == 1);
assert(directory[addr].Sharers.count() == 0);
}
if ((state != State:SS) && (state != State:OO)) {
assert(directory[addr].WaitingUnblocks == 0);
}
if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
directory[addr].DirectoryState := state;
// disable coherence checker
// sequencer.checkCoherence(addr);
}
else {
directory[addr].DirectoryState := state;
}
}
}
// if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
bool isBlockShared(Address addr) {
if (directory.isPresent(addr)) {
if (directory[addr].DirectoryState == State:I) {
return true;
}
}
return false;
}
bool isBlockExclusive(Address addr) {
if (directory.isPresent(addr)) {
if (directory[addr].DirectoryState == State:I) {
return true;
}
}
return false;
}
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(goo1_out, ResponseMsg, goo1);
// ** IN_PORTS **
in_port(foo1_in, ResponseMsg, foo1) {
}
// in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
// if (unblockNetwork_in.isReady()) {
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (directory[in_msg.Address].WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.Address);
} else {
trigger(Event:Unblock, in_msg.Address);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:PUTX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:PUTO, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
trigger(Event:PUTO_SHARERS, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
// Actions
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(c_clearOwner, "c", desc="Clear the owner field") {
directory[address].Owner.clear();
}
action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
directory[address].Sharers.addNetDest(directory[address].Owner);
directory[address].Owner.clear();
}
action(cc_clearSharers, "\c", desc="Clear the sharers field") {
directory[address].Sharers.clear();
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
// enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
out_msg.Address := address;
if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
} else {
out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Acks := directory[address].Sharers.count();
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
peek(unblockNetwork_in, ResponseMsg) {
directory[address].Owner.clear();
directory[address].Owner.add(in_msg.Sender);
}
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
out_msg.Acks := directory[address].Sharers.count();
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((directory[in_msg.Address].Sharers.count() > 1) ||
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
// out_msg.Destination := directory[in_msg.Address].Sharers;
out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
out_msg.Destination.remove(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Invalidate_Control;
}
}
}
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
requestQueue_in.dequeue();
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
unblockNetwork_in.dequeue();
}
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Address, in_msg.DataBlk);
}
}
action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
}
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
peek(unblockNetwork_in, ResponseMsg) {
directory[address].Sharers.add(in_msg.Sender);
}
}
action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
}
action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
assert(directory[address].WaitingUnblocks >= 0);
}
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
requestQueue_in.recycle();
}
// TRANSITIONS
transition(I, GETX, MM) {
d_sendData;
i_popIncomingRequestQueue;
}
transition(S, GETX, MM) {
d_sendData;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition(I, GETS, IS) {
d_sendData;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
d_sendData;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition({I, S}, PUTO) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition({I, S, O}, PUTX) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition(O, GETX, MM) {
f_forwardRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition(M, GETX, MM) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
transition(M, PUTX, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
// happens if M->O transition happens on-chip
transition(M, PUTO, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(M, PUTO_SHARERS, MIS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO, OS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO_SHARERS, OSS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
zz_recycleRequest;
}
transition({MM, MO}, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(MO, Unblock, O) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
zz_recycleRequest;
}
transition(IS, GETS) {
zz_recycleRequest;
}
transition(IS, Unblock, S) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition(IS, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(SS, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(SS, Last_Unblock, S) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Last_Unblock, O) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(MI, Dirty_Writeback, I) {
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
j_popIncomingUnblockQueue;
}
transition(MIS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
j_popIncomingUnblockQueue;
}
transition(MIS, Clean_Writeback, S) {
c_moveOwnerToSharer;
j_popIncomingUnblockQueue;
}
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
j_popIncomingUnblockQueue;
}
transition(OSS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
j_popIncomingUnblockQueue;
}
transition(OSS, Clean_Writeback, S) {
c_moveOwnerToSharer;
j_popIncomingUnblockQueue;
}
transition(MI, Clean_Writeback, I) {
c_clearOwner;
cc_clearSharers;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition(OS, Clean_Writeback, S) {
c_clearOwner;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition({MI, MIS}, Unblock, M) {
j_popIncomingUnblockQueue;
}
transition({OS, OSS}, Unblock, O) {
j_popIncomingUnblockQueue;
}
}

View file

@ -1,981 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
machine(L1Cache, "Directory protocol") {
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
MessageBuffer unblockFromCache, network="To", virtual_network="3", ordered="false";
MessageBuffer forwardToCache, network="From", virtual_network="1", ordered="false";
MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
// STATES
enumeration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
NP, desc="Not Present";
I, desc="Idle";
S, desc="Shared";
O, desc="Owned";
E, desc="Exclusive (clean)";
M, desc="Modified (dirty)";
MM, desc="Modified (dirty and locally modified)";
// Transient States
IM, "IM", desc="Issued GetX";
SM, "SM", desc="Issued GetX, we still have an old copy of the line";
OM, "OM", desc="Issued GetX, received data";
IS, "IS", desc="Issued GetS";
OI, "OI", desc="Issued PutO, waiting for ack";
MI, "MI", desc="Issued PutX, waiting for ack";
II, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
}
// EVENTS
enumeration(Event, desc="Cache events") {
Load, desc="Load request from the processor";
Ifetch, desc="I-fetch request from the processor";
Store, desc="Store request from the processor";
L2_Replacement, desc="Replacement";
L1_to_L2, desc="L1 to L2 transfer";
L2_to_L1D, desc="L2 to L1-Data transfer";
L2_to_L1I, desc="L2 to L1-Instruction transfer";
// Requests
Own_GETX, desc="We observe our own GetX forwarded back to us";
Fwd_GETX, desc="A GetX from another processor";
Fwd_GETS, desc="A GetS from another processor";
Inv, desc="Invalidations from the directory";
// Responses
Ack, desc="Received an ack message";
Data, desc="Received a data message, responder has a shared copy";
Exclusive_Data_Clean, desc="Received a data message, no other processor has it, data is clean";
Exclusive_Data_Dirty, desc="Received a data message, no other processor has it, data is dirty";
Writeback_Ack, desc="Writeback O.K. from directory";
Writeback_Nack, desc="Writeback not O.K. from directory";
// Triggers
All_acks, desc="Received all required data and message acks";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
bool Dirty, desc="Is the data dirty (different than memory)?";
DataBlock DataBlk, desc="data for the block";
}
// TBE fields
structure(TBE, desc="...") {
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
bool Dirty, desc="Is the data dirty (different than memory)?";
int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
}
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
}
external_type(TBETable) {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
MessageBuffer mandatoryQueue, abstract_chip_ptr="true", ordered="false";
Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
TBETable TBEs, template_hack="<L1Cache_TBE>";
CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
Entry getCacheEntry(Address addr), return_by_ref="yes" {
if (L2cacheMemory.isTagPresent(addr)) {
return L2cacheMemory[addr];
} else if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory[addr];
} else {
return L1IcacheMemory[addr];
}
}
void changePermission(Address addr, AccessPermission permission) {
if (L2cacheMemory.isTagPresent(addr)) {
return L2cacheMemory.changePermission(addr, permission);
} else if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory.changePermission(addr, permission);
} else {
return L1IcacheMemory.changePermission(addr, permission);
}
}
bool isCacheTagPresent(Address addr) {
return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
}
State getState(Address addr) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
if(TBEs.isPresent(addr)) {
return TBEs[addr].TBEState;
} else if (isCacheTagPresent(addr)) {
return getCacheEntry(addr).CacheState;
}
return State:NP;
}
void setState(Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
if (TBEs.isPresent(addr)) {
TBEs[addr].TBEState := state;
}
if (isCacheTagPresent(addr)) {
getCacheEntry(addr).CacheState := state;
if (state == State:E) {
assert(getCacheEntry(addr).Dirty == false);
}
if ((state == State:M) || (state == State:MM)) {
assert(getCacheEntry(addr).Dirty == true);
}
// Set permission
if (state == State:MM) {
changePermission(addr, AccessPermission:Read_Write);
} else if ((state == State:S) ||
(state == State:O) ||
(state == State:M) ||
(state == State:E) ||
(state == State:SM) ||
(state == State:OM)) {
changePermission(addr, AccessPermission:Read_Only);
} else {
changePermission(addr, AccessPermission:Invalid);
}
}
}
Event mandatory_request_type_to_event(CacheRequestType type) {
if (type == CacheRequestType:LD) {
return Event:Load;
} else if (type == CacheRequestType:IFETCH) {
return Event:Ifetch;
} else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
return Event:Store;
} else {
error("Invalid CacheRequestType");
}
}
MessageBuffer triggerQueue, ordered="true";
// ** OUT_PORTS **
out_port(requestNetwork_out, RequestMsg, requestFromCache);
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
out_port(triggerQueue_out, TriggerMsg, triggerQueue);
// ** IN_PORTS **
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.Address);
} else {
error("Unexpected message");
}
}
}
}
// Nothing from the request network
// Forward Network
in_port(forwardToCache_in, RequestMsg, forwardToCache) {
if (forwardToCache_in.isReady()) {
peek(forwardToCache_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.Address);
} else {
trigger(Event:Fwd_GETX, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.Address);
} else {
error("Unexpected message");
}
}
}
}
// Response Network
in_port(responseToCache_in, ResponseMsg, responseToCache) {
if (responseToCache_in.isReady()) {
peek(responseToCache_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_CLEAN) {
trigger(Event:Exclusive_Data_Clean, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_DIRTY) {
trigger(Event:Exclusive_Data_Dirty, in_msg.Address);
} else {
error("Unexpected message");
}
}
}
}
// Nothing from the unblock network
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg) {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
if (in_msg.Type == CacheRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
// The block is in the wrong L1, try to write it to the L2
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
trigger(Event:L1_to_L2, in_msg.Address);
} else {
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
}
}
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
// L1 does't have the line, but we have space for it in the L1
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
// L2 has it (maybe not with the right permissions)
trigger(Event:L2_to_L1I, in_msg.Address);
} else {
// We have room, the L2 doesn't have it, so the L1 fetches the line
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
}
} else {
// No room in the L1, so we need to make room
if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
// The L2 has room, so we move the line from the L1 to the L2
trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
} else {
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
}
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
// The block is in the wrong L1, try to write it to the L2
if (L2cacheMemory.cacheAvail(in_msg.Address)) {
trigger(Event:L1_to_L2, in_msg.Address);
} else {
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
}
}
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
// The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
// L1 does't have the line, but we have space for it in the L1
if (L2cacheMemory.isTagPresent(in_msg.Address)) {
// L2 has it (maybe not with the right permissions)
trigger(Event:L2_to_L1D, in_msg.Address);
} else {
// We have room, the L2 doesn't have it, so the L1 fetches the line
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
}
} else {
// No room in the L1, so we need to make room
if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
// The L2 has room, so we move the line from the L1 to the L2
trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
} else {
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
}
}
}
}
}
}
}
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Request_Control;
// TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
}
}
action(b_issueGETX, "b", desc="Issue GETX") {
enqueue(requestNetwork_out, RequestMsg, latency="(ISSUE_LATENCY-1)") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Request_Control;
// TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
}
}
action(d_issuePUTX, "d", desc="Issue PUTX") {
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(dd_issuePUTO, "\d", desc="Issue PUTO") {
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(e_sendData, "e", desc="Send data from cache to requestor") {
peek(forwardToCache_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
peek(forwardToCache_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_DIRTY;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
peek(forwardToCache_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Acks := 0 - 1; // -1
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(g_sendUnblock, "g", desc="Send unblock to memory") {
enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Unblock_Control;
}
}
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Sender := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Unblock_Control;
}
}
action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
DEBUG_EXPR(getCacheEntry(address).DataBlk);
sequencer.readCallback(address, getCacheEntry(address).DataBlk);
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
DEBUG_EXPR(getCacheEntry(address).DataBlk);
sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
getCacheEntry(address).Dirty := true;
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
TBEs[address].Dirty := getCacheEntry(address).Dirty;
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
triggerQueue_in.dequeue();
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
forwardToCache_in.dequeue();
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToCache_in, ResponseMsg) {
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
}
}
action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
peek(forwardToCache_in, RequestMsg) {
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
}
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
responseToCache_in.dequeue();
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
if (TBEs[address].NumPendingMsgs == 0) {
enqueue(triggerQueue_out, TriggerMsg) {
out_msg.Address := address;
out_msg.Type := TriggerType:ALL_ACKS;
}
}
}
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
peek(forwardToCache_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.Dirty := TBEs[address].Dirty;
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.Dirty := TBEs[address].Dirty;
if (TBEs[address].Dirty) {
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY;
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Writeback_Data;
} else {
out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN;
// NOTE: in a real system this would not send data. We send
// data here only so we can check it at the memory
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
TBEs.deallocate(address);
}
action(u_writeDataToCache, "u", desc="Write data to cache") {
peek(responseToCache_in, ResponseMsg) {
getCacheEntry(address).DataBlk := in_msg.DataBlk;
getCacheEntry(address).Dirty := in_msg.Dirty;
}
}
action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
peek(responseToCache_in, ResponseMsg) {
assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
getCacheEntry(address).DataBlk := in_msg.DataBlk;
getCacheEntry(address).Dirty := in_msg.Dirty;
}
}
action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
}
}
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
if (L1DcacheMemory.isTagPresent(address) == false) {
L1DcacheMemory.allocate(address);
}
}
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
if (L1IcacheMemory.isTagPresent(address) == false) {
L1IcacheMemory.allocate(address);
}
}
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
L2cacheMemory.allocate(address);
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
L2cacheMemory.deallocate(address);
}
action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
if (L1DcacheMemory.isTagPresent(address)) {
L2cacheMemory[address] := L1DcacheMemory[address];
} else {
L2cacheMemory[address] := L1IcacheMemory[address];
}
}
action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory[address] := L2cacheMemory[address];
} else {
L1IcacheMemory[address] := L2cacheMemory[address];
}
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(mandatoryQueue_in, CacheMsg) {
profile_miss(in_msg, id);
}
}
action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
mandatoryQueue_in.recycle();
}
//*****************************************************
// TRANSITIONS
//*****************************************************
// Transitions for Load/Store/L2_Replacement from transient states
transition({IM, SM, OM, IS, OI, MI, II}, {Store, L2_Replacement}) {
zz_recycleMandatoryQueue;
}
transition({IM, IS, OI, MI, II}, {Load, Ifetch}) {
zz_recycleMandatoryQueue;
}
transition({IM, SM, OM, IS, OI, MI, II}, L1_to_L2) {
zz_recycleMandatoryQueue;
}
// Transitions moving data between the L1 and L2 caches
transition({I, S, O, E, M, MM}, L1_to_L2) {
vv_allocateL2CacheBlock;
ss_copyFromL1toL2; // Not really needed for state I
kk_deallocateL1CacheBlock;
}
transition({I, S, O, E, M, MM}, L2_to_L1D) {
ii_allocateL1DCacheBlock;
tt_copyFromL2toL1; // Not really needed for state I
rr_deallocateL2CacheBlock;
}
transition({I, S, O, E, M, MM}, L2_to_L1I) {
jj_allocateL1ICacheBlock;
tt_copyFromL2toL1; // Not really needed for state I
rr_deallocateL2CacheBlock;
}
// Transitions from Idle
transition({NP, I}, Load, IS) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
uu_profileMiss;
k_popMandatoryQueue;
}
transition({NP, I}, Ifetch, IS) {
jj_allocateL1ICacheBlock;
i_allocateTBE;
a_issueGETS;
uu_profileMiss;
k_popMandatoryQueue;
}
transition({NP, I}, Store, IM) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
uu_profileMiss;
k_popMandatoryQueue;
}
transition(I, L2_Replacement) {
rr_deallocateL2CacheBlock;
}
transition({NP, I}, Inv) {
f_sendAck;
l_popForwardQueue;
}
// Transitions from Shared
transition({S, SM}, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(S, Store, SM) {
i_allocateTBE;
b_issueGETX;
uu_profileMiss;
k_popMandatoryQueue;
}
transition(S, L2_Replacement, I) {
rr_deallocateL2CacheBlock;
}
transition(S, Inv, I) {
f_sendAck;
l_popForwardQueue;
}
// Transitions from Owned
transition({O, OM}, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(O, Store, OM) {
i_allocateTBE;
b_issueGETX;
// p_decrementNumberOfMessagesByOne;
uu_profileMiss;
k_popMandatoryQueue;
}
transition(O, L2_Replacement, OI) {
i_allocateTBE;
dd_issuePUTO;
rr_deallocateL2CacheBlock;
}
transition(O, Fwd_GETX, I) {
e_sendData;
l_popForwardQueue;
}
transition(O, Fwd_GETS) {
e_sendData;
l_popForwardQueue;
}
// Transitions from MM
transition(MM, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(MM, Store) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(MM, L2_Replacement, MI) {
i_allocateTBE;
d_issuePUTX;
rr_deallocateL2CacheBlock;
}
transition(MM, Fwd_GETX, I) {
e_sendData;
l_popForwardQueue;
}
transition(MM, Fwd_GETS, I) {
ee_sendDataExclusive;
l_popForwardQueue;
}
// Transitions from M
transition({E, M}, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition({E, M}, Store, MM) {
hh_store_hit;
k_popMandatoryQueue;
}
transition({E, M}, L2_Replacement, MI) {
i_allocateTBE;
d_issuePUTX;
rr_deallocateL2CacheBlock;
}
transition({E, M}, Fwd_GETX, I) {
e_sendData;
l_popForwardQueue;
}
transition({E, M}, Fwd_GETS, O) {
e_sendData;
l_popForwardQueue;
}
// Transitions from IM
transition(IM, Inv) {
f_sendAck;
l_popForwardQueue;
}
transition(IM, Ack) {
m_decrementNumberOfMessages;
o_checkForCompletion;
n_popResponseQueue;
}
transition(IM, Data, OM) {
u_writeDataToCache;
m_decrementNumberOfMessages;
o_checkForCompletion;
n_popResponseQueue;
}
// Transitions from SM
transition(SM, Inv, IM) {
f_sendAck;
l_popForwardQueue;
}
transition(SM, Ack) {
m_decrementNumberOfMessages;
o_checkForCompletion;
n_popResponseQueue;
}
transition(SM, Data, OM) {
v_writeDataToCacheVerify;
m_decrementNumberOfMessages;
o_checkForCompletion;
n_popResponseQueue;
}
// Transitions from OM
transition(OM, Own_GETX) {
mm_decrementNumberOfMessages;
o_checkForCompletion;
l_popForwardQueue;
}
transition(OM, Fwd_GETX, IM) {
e_sendData;
l_popForwardQueue;
}
transition(OM, Fwd_GETS, OM) {
e_sendData;
l_popForwardQueue;
}
transition(OM, Ack) {
m_decrementNumberOfMessages;
o_checkForCompletion;
n_popResponseQueue;
}
transition(OM, All_acks, MM) {
hh_store_hit;
gg_sendUnblockExclusive;
s_deallocateTBE;
j_popTriggerQueue;
}
// Transitions from IS
transition(IS, Inv) {
f_sendAck;
l_popForwardQueue;
}
transition(IS, Data, S) {
u_writeDataToCache;
m_decrementNumberOfMessages;
h_load_hit;
g_sendUnblock;
s_deallocateTBE;
n_popResponseQueue;
}
transition(IS, Exclusive_Data_Clean, E) {
u_writeDataToCache;
m_decrementNumberOfMessages;
h_load_hit;
gg_sendUnblockExclusive;
s_deallocateTBE;
n_popResponseQueue;
}
transition(IS, Exclusive_Data_Dirty, M) {
u_writeDataToCache;
m_decrementNumberOfMessages;
h_load_hit;
gg_sendUnblockExclusive;
s_deallocateTBE;
n_popResponseQueue;
}
// Transitions from OI/MI
transition(MI, Fwd_GETS) {
q_sendDataFromTBEToCache;
l_popForwardQueue;
}
transition(MI, Fwd_GETX, II) {
q_sendDataFromTBEToCache;
l_popForwardQueue;
}
transition(OI, Fwd_GETS) {
q_sendDataFromTBEToCache;
l_popForwardQueue;
}
transition(OI, Fwd_GETX, II) {
q_sendDataFromTBEToCache;
l_popForwardQueue;
}
transition({OI, MI}, Writeback_Ack, I) {
qq_sendDataFromTBEToMemory;
s_deallocateTBE;
l_popForwardQueue;
}
transition(MI, Writeback_Nack, OI) {
// FIXME: This might cause deadlock by re-using the writeback
// channel, we should handle this case differently.
dd_issuePUTO;
l_popForwardQueue;
}
// Transitions from II
transition(II, Writeback_Ack, I) {
g_sendUnblock;
s_deallocateTBE;
l_popForwardQueue;
}
transition(II, Writeback_Nack, I) {
s_deallocateTBE;
l_popForwardQueue;
}
transition(II, Inv) {
f_sendAck;
l_popForwardQueue;
}
}

View file

@ -1,495 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
machine(Directory, "Directory protocol") {
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
MessageBuffer unblockToDir, network="From", virtual_network="3", ordered="false";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, desc="Invalid";
S, desc="Shared";
O, desc="Owner";
M, desc="Modified";
IS, desc="Blocked, was in idle";
SS, desc="Blocked, was in shared";
OO, desc="Blocked, was in owned";
MO, desc="Blocked, going to owner or maybe modified";
MM, desc="Blocked, going to modified";
MI, desc="Blocked on a writeback";
OS, desc="Blocked on a writeback";
}
// Events
enumeration(Event, desc="Directory events") {
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
PUTX, desc="A PUTX arrives";
PUTO, desc="A PUTO arrives";
Unblock, desc="An unblock message arrives";
Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// External function
void profile_sharing(Address addr, AccessType type, NodeID requestor, Set sharers, Set owner);
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
State getState(Address addr) {
return directory[addr].DirectoryState;
}
void setState(Address addr, State state) {
if (directory.isPresent(addr)) {
if ((state == State:I) || (state == State:IS)) {
assert(directory[addr].Owner.count() == 0);
assert(directory[addr].Sharers.count() == 0);
}
if ((state == State:S) || (state == State:SS)) {
assert(directory[addr].Owner.count() == 0);
assert(directory[addr].Sharers.count() != 0);
}
if ((state == State:O) || (state == State:OO)) {
assert(directory[addr].Owner.count() == 1);
assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
}
if (state == State:M) {
assert(directory[addr].Owner.count() == 1);
assert(directory[addr].Sharers.count() == 0);
}
if ((state != State:SS) && (state != State:OO)) {
assert(directory[addr].WaitingUnblocks == 0);
}
directory[addr].DirectoryState := state;
}
}
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
// ** IN_PORTS **
in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (directory[in_msg.Address].WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.Address);
} else {
trigger(Event:Unblock, in_msg.Address);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY) {
trigger(Event:Dirty_Writeback, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN) {
trigger(Event:Clean_Writeback, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:PUTX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:PUTO, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
// Actions
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(c_clearOwner, "c", desc="Clear the owner field") {
directory[address].Owner.clear();
}
action(cc_clearSharers, "\c", desc="Clear the sharers field") {
directory[address].Sharers.clear();
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_CLEAN;
} else {
out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Acks := directory[address].Sharers.count();
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
peek(unblockNetwork_in, ResponseMsg) {
directory[address].Owner.clear();
directory[address].Owner.add(in_msg.Sender);
}
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination := directory[in_msg.Address].Owner;
out_msg.Acks := directory[address].Sharers.count();
if (directory[address].Sharers.isElement(in_msg.Requestor)) {
out_msg.Acks := out_msg.Acks - 1;
}
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((directory[in_msg.Address].Sharers.count() > 1) ||
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination := directory[in_msg.Address].Sharers;
out_msg.Destination.remove(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
}
}
}
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
// Profile the request
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETX) {
// profile_sharing(address, AccessType:Write, machineIDToNodeID(in_msg.Requestor), directory[address].Sharers, directory[address].Owner);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
// profile_sharing(address, AccessType:Read, machineIDToNodeID(in_msg.Requestor), directory[address].Sharers, directory[address].Owner);
}
}
requestQueue_in.dequeue();
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
unblockNetwork_in.dequeue();
}
action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
}
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
peek(unblockNetwork_in, ResponseMsg) {
directory[address].Sharers.add(in_msg.Sender);
}
}
action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
}
action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
assert(directory[address].WaitingUnblocks >= 0);
}
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
requestQueue_in.recycle();
}
// TRANSITIONS
transition(I, GETX, MM) {
d_sendData;
i_popIncomingRequestQueue;
}
transition(S, GETX, MM) {
d_sendData;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition(I, GETS, IS) {
d_sendData;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
d_sendData;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition({I, S, M}, PUTO) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition({I, S, O}, PUTX) {
b_sendWriteBackNack;
i_popIncomingRequestQueue;
}
transition(O, GETX, MM) {
f_forwardRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
transition(M, GETX, MM) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
transition(M, PUTX, MI) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition(O, PUTO, OS) {
a_sendWriteBackAck;
i_popIncomingRequestQueue;
}
transition({MM, MO, MI, OS}, {GETS, GETX, PUTO, PUTX}) {
zz_recycleRequest;
}
transition({MM, MO}, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(MO, Unblock, O) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition({IS, SS, OO}, {GETX, PUTO, PUTX}) {
zz_recycleRequest;
}
transition(IS, GETS) {
zz_recycleRequest;
}
transition(IS, Unblock, S) {
m_addUnlockerToSharers;
j_popIncomingUnblockQueue;
}
transition(IS, Exclusive_Unblock, M) {
cc_clearSharers;
e_ownerIsUnblocker;
j_popIncomingUnblockQueue;
}
transition(SS, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(SS, Last_Unblock, S) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Unblock) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(OO, Last_Unblock, O) {
m_addUnlockerToSharers;
o_decrementOutstanding;
j_popIncomingUnblockQueue;
}
transition(MI, Dirty_Writeback, I) {
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
j_popIncomingUnblockQueue;
}
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
j_popIncomingUnblockQueue;
}
transition(MI, Clean_Writeback, I) {
c_clearOwner;
cc_clearSharers;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition(OS, Clean_Writeback, S) {
c_clearOwner;
ll_checkDataInMemory;
j_popIncomingUnblockQueue;
}
transition(MI, Unblock, M) {
j_popIncomingUnblockQueue;
}
transition(OS, Unblock, O) {
j_popIncomingUnblockQueue;
}
}

View file

@ -1,89 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
PUTX, desc="Put eXclusive";
PUTO, desc="Put Owned";
WB_ACK, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
INV, desc="Invalidation";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
ACK, desc="ACKnowledgment, responder doesn't have a copy";
DATA, desc="Data";
DATA_EXCLUSIVE_CLEAN, desc="Data, no other processor has a copy, data is clean";
DATA_EXCLUSIVE_DIRTY, desc="Data, no other processor has a copy, data is dirty";
UNBLOCK, desc="Unblock";
UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
WRITEBACK_CLEAN, desc="Clean writeback (no data)";
WRITEBACK_DIRTY, desc="Dirty writeback (contains data)";
}
// TriggerType
enumeration(TriggerType, desc="...") {
ALL_ACKS, desc="See corresponding event";
}
// TriggerMsg
structure(TriggerMsg, desc="...", interface="Message") {
Address Address, desc="Physical address for this request";
TriggerType Type, desc="Type of trigger";
}
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
int Acks, desc="How many acks to expect";
MessageSizeType MessageSize, desc="size category of the message";
}
// ResponseMsg (and also unblock requests)
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
int Acks, desc="How many acks to expect";
MessageSizeType MessageSize, desc="size category of the message";
}

View file

@ -1,4 +0,0 @@
MOESI_SMP_directory-msg.sm
MOESI_SMP_directory-cache.sm
MOESI_SMP_directory-dir.sm
standard_SMP-protocol.sm

File diff suppressed because it is too large Load diff

View file

@ -1,405 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_token-dir.sm 1.5 04/11/17 14:07:50-06:00 mikem@emperor15.cs.wisc.edu $
*/
machine(Directory, "Token protocol") {
MessageBuffer responseFromDir, network="To", virtual_network="0", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="0", ordered="false";
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
MessageBuffer persistentToDir, network="From", virtual_network="2", ordered="true";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_O") {
// Base states
O, desc="Owner";
NO, desc="Not Owner";
L, desc="Locked";
}
// Events
enumeration(Event, desc="Directory events") {
GETX, desc="A GETX arrives";
GETS, desc="A GETS arrives";
Lockdown, desc="A lockdown request arrives";
Unlockdown, desc="An un-lockdown request arrives";
Data_Owner, desc="Data arrive, includes the owner token";
Data_Shared, desc="Data arrive, does not include the owner token";
Ack, desc="Tokens arrive";
Ack_Owner, desc="Tokens arrive, including the owner token";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
PersistentTable persistentTable, constructor_hack="i";
State getState(Address addr) {
return directory[addr].DirectoryState;
}
void setState(Address addr, State state) {
directory[addr].DirectoryState := state;
if (state == State:L) {
assert(directory[addr].Tokens == 0);
}
// Make sure the token count is in range
assert(directory[addr].Tokens >= 0);
assert(directory[addr].Tokens <= max_tokens());
if (state == State:O) {
assert(directory[addr].Tokens >= 1); // Must have at least one token
assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
}
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// ** IN_PORTS **
in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
if (persistentNetwork_in.isReady()) {
peek(persistentNetwork_in, PersistentMsg) {
// Apply the lockdown or unlockdown message to the table
if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
} else {
error("Invalid message");
}
// React to the message based on the current state of the table
if (persistentTable.isLocked(in_msg.Address)) {
trigger(Event:Lockdown, in_msg.Address); // locked
} else {
trigger(Event:Unlockdown, in_msg.Address); // unlocked
}
}
}
}
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
trigger(Event:Data_Shared, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
// Actions
action(a_sendTokens, "a", desc="Send tokens to requestor") {
// Only send a message if we have tokens to send
if (directory[address].Tokens > 0) {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := directory[in_msg.Address].Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
directory[address].Tokens := 0;
}
}
action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
// Only send a message if we have tokens to send
if (directory[address].Tokens > 0) {
enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := directory[address].Tokens;
out_msg.MessageSize := MessageSizeType:Response_Control;
}
directory[address].Tokens := 0;
}
}
action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
assert(directory[address].Tokens > 0);
out_msg.Tokens := directory[in_msg.Address].Tokens;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
directory[address].Tokens := 0;
}
action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
assert(directory[address].Tokens > 0);
out_msg.Tokens := directory[address].Tokens;
out_msg.DataBlk := directory[address].DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
directory[address].Tokens := 0;
}
action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Tokens >= 1);
directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
responseNetwork_in.dequeue();
}
action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
persistentNetwork_in.dequeue();
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
}
}
action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := in_msg.Dirty;
out_msg.MessageSize := in_msg.MessageSize;
}
}
}
action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
// Bounce the message, but "re-associate" the data and the owner
// token. In essence we're converting an ACK_OWNER message to a
// DATA_OWNER message, keeping the number of tokens the same.
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_OWNER;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
out_msg.Destination.add(persistentTable.findSmallest(address));
out_msg.DestMachine := MachineType:L1Cache;
out_msg.Tokens := in_msg.Tokens;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.Dirty := in_msg.Dirty;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
// TRANSITIONS
// Trans. from O
transition(O, GETX, NO) {
d_sendDataWithAllTokens;
j_popIncomingRequestQueue;
}
transition(O, GETS, NO) {
d_sendDataWithAllTokens;
// Since we found the owner, no need to forward
j_popIncomingRequestQueue;
}
transition(O, Lockdown, L) {
dd_sendDataWithAllTokensToStarver;
l_popIncomingPersistentQueue;
}
transition(O, {Data_Shared, Ack}) {
f_incrementTokens;
k_popIncomingResponseQueue;
}
// Trans. from NO
transition(NO, GETX) {
a_sendTokens;
j_popIncomingRequestQueue;
}
transition(NO, GETS) {
j_popIncomingRequestQueue;
}
transition(NO, Lockdown, L) {
aa_sendTokensToStarver;
l_popIncomingPersistentQueue;
}
transition(NO, Data_Owner, O) {
m_writeDataToMemory;
f_incrementTokens;
k_popIncomingResponseQueue;
}
transition(NO, Ack_Owner, O) {
n_checkIncomingMsg;
f_incrementTokens;
k_popIncomingResponseQueue;
}
transition(NO, {Data_Shared, Ack}) {
f_incrementTokens;
k_popIncomingResponseQueue;
}
// Trans. from L
transition(L, {GETX, GETS}) {
j_popIncomingRequestQueue;
}
transition(L, Lockdown) {
l_popIncomingPersistentQueue;
}
transition(L, {Data_Owner, Data_Shared, Ack}) {
r_bounceResponse;
k_popIncomingResponseQueue;
}
transition(L, Ack_Owner) {
s_bounceDatalessOwnerToken;
k_popIncomingResponseQueue;
}
transition(L, Unlockdown, NO) {
l_popIncomingPersistentQueue;
}
}

View file

@ -1,61 +0,0 @@
/*
* $Id: MOESI_token-msg.sm 1.3 04/06/05 22:43:20-00:00 kmoore@cottons.cs.wisc.edu $
*
*/
//int max_tokens();
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
}
// StarvationType
enumeration(PersistentRequestType, desc="...") {
GETX_PERSISTENT, desc="...";
GETS_PERSISTENT, desc="...";
DEACTIVATE_PERSISTENT, desc="...";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
DATA_OWNER, desc="Data, with the owner token";
DATA_SHARED, desc="Data, without the owner token";
ACK, desc="ACKnowledgment";
ACK_OWNER, desc="ACKnowledgment, includes the clean owner token";
}
// StarvationMsg
structure(PersistentMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
PersistentRequestType Type, desc="Type of starvation request";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Destination set";
MessageSizeType MessageSize, desc="size category of the message";
}
// RequestMsg
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
MachineType DestMachine, desc="What component receives the data";
MessageSizeType MessageSize, desc="size category of the message";
}
// ResponseMsg
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
MachineType SenderMachine, desc="What component sent the data";
NetDest Destination, desc="Node to whom the data is sent";
MachineType DestMachine, desc="What component receives the data";
int Tokens, desc="Number of tokens being transfered for this line";
DataBlock DataBlk, desc="data for the cache line";
bool Dirty, desc="Is the data dirty (different than memory)?";
MessageSizeType MessageSize, desc="size category of the message";
}

View file

@ -1,4 +0,0 @@
MOESI_SMP_token-msg.sm
MOESI_SMP_token-cache.sm
MOESI_SMP_token-dir.sm
standard_SMP-protocol.sm

File diff suppressed because it is too large Load diff

View file

@ -1,267 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
machine(Directory, "MOSI Broadcast Optimized") {
MessageBuffer addressFromDir, network="To", virtual_network="0", ordered="true";
MessageBuffer dataFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer addressToDir, network="From", virtual_network="0", ordered="true";
MessageBuffer dataToDir, network="From", virtual_network="1", ordered="false";
enumeration(State, desc="Directory states", default="Directory_State_C") {
C, desc="Cold - no processor has requested this line";
I, desc="Idle";
S, desc="Shared";
SS, desc="Shared, 2 or more shares";
OS, desc="Owned by a cache";
OSS, desc="Owned by a cache, present in at least 3 caches";
M, desc="Modified", format="!b";
}
// ** EVENTS **
enumeration(Event, desc="Directory events") {
// From Address network
OtherAddress, desc="We saw an address msg to someone else";
GETS, desc="A GETS arrives";
GET_INSTR, desc="A GETInstr arrives";
GETX, desc="A GETX arrives", format="!r";
PUTX_Owner, desc="A PUTX arrives, requestor is owner";
PUTX_NotOwner, desc="A PUTX arrives, requestor is not owner", format="!r";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
bool DirOwner, default="true", desc="Is dir owner?";
MachineID ProcOwner, desc="Processor Owner";
DataBlock DataBlk, desc="data for the block";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
void profile_request(int cache_state, State directory_state, GenericRequestType request_type);
State getState(Address addr) {
if (directory.isPresent(addr)) {
return directory[addr].DirectoryState;
}
return State:C;
}
void setState(Address addr, State state) {
if (directory.isPresent(addr)) {
directory[addr].DirectoryState := state;
}
}
// ** OUT_PORTS **
out_port(dataNetwork_out, DataMsg, dataFromDir);
out_port(addressNetwork_out, AddressMsg, addressFromDir);
// ** IN_PORTS **
// Address Network
in_port(addressNetwork_in, AddressMsg, addressToDir) {
if (addressNetwork_in.isReady()) {
peek(addressNetwork_in, AddressMsg) {
if(map_Address_to_Directory(in_msg.Address) != machineID) {
trigger(Event:OtherAddress, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
trigger(Event:GET_INSTR, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
if (in_msg.Requestor == directory[in_msg.Address].ProcOwner && directory[in_msg.Address].DirOwner == false) {
trigger(Event:PUTX_Owner, in_msg.Address);
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address);
}
} else {
error("unexpected message");
}
}
}
}
// *** ACTIONS ***
action(d_sendDataMsg, "d", desc="Send data message to requestor") {
peek(addressNetwork_in, AddressMsg) {
enqueue(dataNetwork_out, DataMsg, latency="MEMORY_LATENCY") {
out_msg.Address := in_msg.Address;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
DEBUG_EXPR(in_msg.Requestor);
DEBUG_EXPR(out_msg.DataBlk);
}
}
}
action(j_popAddressQueue, "j", desc="Pop address queue.") {
addressNetwork_in.dequeue();
}
action(p_profile, "p", desc="Profile this transition.") {
peek(addressNetwork_in, AddressMsg) {
profile_request(in_msg.CacheState, getState(address), convertToGenericType(in_msg.Type));
}
}
action(m_setOwnerRequestor, "m", desc="Set owner = requestor") {
peek(addressNetwork_in, AddressMsg) {
directory[in_msg.Address].ProcOwner := in_msg.Requestor;
directory[in_msg.Address].DirOwner := false;
}
}
action(r_writeDataFromRequest, "r", desc="Write request data to memory") {
peek(addressNetwork_in, AddressMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(x_setOwnerToDirectory, "x", desc="Set owner equal to the directory"){
peek(addressNetwork_in, AddressMsg) {
directory[in_msg.Address].DirOwner := true;
}
}
// TRANSITIONS
// Ignore all address and data messages not bound for us
transition(C, OtherAddress) {
j_popAddressQueue;
}
// PUTX_NotOwner Transitions
transition({I, S, SS, OS, OSS, M}, PUTX_NotOwner) {
p_profile;
j_popAddressQueue;
}
// Transitions from Idle
transition({C, I}, {GETS,GET_INSTR}, S) {
d_sendDataMsg;
p_profile;
j_popAddressQueue;
}
transition({C, I}, GETX, M) {
d_sendDataMsg;
m_setOwnerRequestor;
p_profile;
j_popAddressQueue
}
// Transitions from Shared
transition({S, SS}, {GETS,GET_INSTR}, SS) {
d_sendDataMsg;
p_profile;
j_popAddressQueue;
}
transition({S, SS}, GETX, M) {
d_sendDataMsg;
m_setOwnerRequestor;
p_profile;
j_popAddressQueue;
}
// Transitions from Owned
transition({OS, OSS}, {GETS,GET_INSTR}, OSS) {
p_profile;
j_popAddressQueue;
}
transition({OS, OSS}, GETX, M) {
m_setOwnerRequestor;
p_profile;
j_popAddressQueue;
}
transition(OS, PUTX_Owner, S) {
x_setOwnerToDirectory;
r_writeDataFromRequest;
p_profile;
j_popAddressQueue;
}
transition(OSS, PUTX_Owner, SS) {
x_setOwnerToDirectory;
r_writeDataFromRequest;
p_profile;
j_popAddressQueue;
}
// Transitions from Modified
transition(M, {GETS,GET_INSTR}, OS) {
p_profile;
j_popAddressQueue;
}
transition(M, GETX) {
m_setOwnerRequestor;
p_profile;
j_popAddressQueue;
}
transition(M, PUTX_Owner, I) {
x_setOwnerToDirectory;
r_writeDataFromRequest;
p_profile;
j_popAddressQueue;
}
}

View file

@ -1,79 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GET_INSTR, desc="Get Instruction";
GETS, desc="Get Shared";
GETX, desc="Get eXclusive";
PUTX, desc="Put eXclusive";
}
// AddressMsg
structure(AddressMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
int CacheState, default="1000", desc="Hack to transfer the cache's state for profiling"; // The default of 1000 will generate an error if we forget to set this
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
DataBlock DataBlk, desc="data for the cache line"; // This is used for PutX and Downgrades only
MessageSizeType MessageSize, desc="size category of the message";
}
// DataMsg
structure(DataMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
MachineID Sender, desc="Node who sent the data";
// MachineType SenderType, desc="Component who sent data";
NetDest Destination, desc="Node to whom the data is sent";
MachineType DestMachine, desc="What component receives the data";
DataBlock DataBlk, desc="data for the cache line";
MessageSizeType MessageSize, desc="size category of the message";
}
GenericRequestType convertToGenericType(CoherenceRequestType type) {
if(type == CoherenceRequestType:PUTX) {
return GenericRequestType:PUTX;
} else if(type == CoherenceRequestType:GETS) {
return GenericRequestType:GETS;
} else if(type == CoherenceRequestType:GET_INSTR) {
return GenericRequestType:GET_INSTR;
} else if(type == CoherenceRequestType:GETX) {
return GenericRequestType:GETX;
} else {
DEBUG_EXPR(type);
error("invalid CoherenceRequestType");
}
}

View file

@ -1,4 +0,0 @@
MOSI_SMP_bcast-msg.sm
MOSI_SMP_bcast-cache.sm
MOSI_SMP_bcast-dir.sm
standard_SMP-protocol.sm

View file

@ -1,921 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
machine(L1Cache, "MOSI Broadcast Optimized") {
MessageBuffer addressFromCache, network="To", virtual_network="0", ordered="true";
MessageBuffer dataFromCache, network="To", virtual_network="1", ordered="false";
MessageBuffer addressToCache, network="From", virtual_network="0", ordered="true";
MessageBuffer dataToCache, network="From", virtual_network="1", ordered="false";
// STATES
enumeration(State, desc="Cache states", default="L1Cache_State_I") {
NP, desc="Not Present";
I, desc="Idle";
S, desc="Shared";
O, desc="Owned";
M, desc="Modified", format="!b";
IS_AD, "IS^AD", desc="idle, issued GETS, have not seen GETS or data yet";
IM_AD, "IM^AD", desc="idle, issued GETX, have not seen GETX or data yet";
SM_AD, "SM^AD",desc="shared, issued GETX, have not seen GETX or data yet";
OM_A, "OM^A",desc="owned, issued GETX, have not seen GETX yet", format="!b";
IS_A, "IS^A",desc="idle, issued GETS, have not seen GETS, have seen data";
IM_A, "IM^A",desc="idle, issued GETX, have not seen GETX, have seen data";
SM_A, "SM^A",desc="shared, issued GETX, have not seen GETX, have seen data", format="!b";
MI_A, "MI^A", desc="modified, issued PUTX, have not seen PUTX yet";
OI_A, "OI^A", desc="owned, issued PUTX, have not seen PUTX yet";
II_A, "II^A", desc="modified, issued PUTX, have not seen PUTX, then saw other GETX", format="!b";
IS_D, "IS^D", desc="idle, issued GETS, have seen GETS, have not seen data yet";
IS_D_I, "IS^D^I", desc="idle, issued GETS, have seen GETS, have not seen data, then saw other GETX";
IM_D, "IM^D", desc="idle, issued GETX, have seen GETX, have not seen data yet";
IM_D_O, "IM^D^O", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
IM_D_I, "IM^D^I", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETX";
IM_D_OI, "IM^D^OI", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS, then saw other GETX";
SM_D, "SM^D", desc="shared, issued GETX, have seen GETX, have not seen data yet";
SM_D_O, "SM^D^O", desc="shared, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
}
// ** EVENTS **
enumeration(Event, desc="Cache events") {
// From processor
Load, desc="Load request from the processor";
Ifetch, desc="I-fetch request from the processor";
Store, desc="Store request from the processor";
Replacement, desc="Replacement";
Load_prefetch, desc="Read only prefetch";
Store_prefetch, desc="Read write prefetch", format="!r";
// From Address network
Own_GETS, desc="Occurs when we observe our own GETS request in the global order";
Own_GET_INSTR, desc="Occurs when we observe our own GETInstr request in the global order";
Own_GETX, desc="Occurs when we observe our own GETX request in the global order";
Own_PUTX, desc="Occurs when we observe our own PUTX request in the global order", format="!r";
Other_GETS, desc="Occurs when we observe a GETS request from another processor";
Other_GET_INSTR, desc="Occurs when we observe a GETInstr request from another processor";
Other_GETX, desc="Occurs when we observe a GETX request from another processor";
Other_PUTX, desc="Occurs when we observe a PUTX request from another processor", format="!r";
// From Data network
Data, desc="Data for this block from the data network";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="data for the block";
}
// TBE fields
structure(TBE, desc="...") {
Address Address, desc="Physical address for this TBE";
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="Buffer for the data block";
NetDest ForwardIDs, desc="IDs of the processors to forward the block";
Address ForwardAddress, desc="Address of request for forwarding";
bool isPrefetch, desc="Set if this request is a prefetch";
}
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
}
external_type(TBETable) {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
MessageBuffer optionalQueue, ordered="true", abstract_chip_ptr="true";
Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
TBETable TBEs, template_hack="<L1Cache_TBE>";
CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified"', abstract_chip_ptr="true";
int cache_state_to_int(State state);
State getState(Address addr) {
if(TBEs.isPresent(addr)) {
return TBEs[addr].TBEState;
} else if (cacheMemory.isTagPresent(addr)) {
return cacheMemory[addr].CacheState;
}
return State:NP;
}
void setState(Address addr, State state) {
if (TBEs.isPresent(addr)) {
TBEs[addr].TBEState := state;
}
if (cacheMemory.isTagPresent(addr)) {
cacheMemory[addr].CacheState := state;
// Set permission
if ((state == State:I) || (state == State:MI_A) || (state == State:II_A)) {
cacheMemory.changePermission(addr, AccessPermission:Invalid);
} else if (state == State:S || state == State:O) {
cacheMemory.changePermission(addr, AccessPermission:Read_Only);
} else if (state == State:M) {
cacheMemory.changePermission(addr, AccessPermission:Read_Write);
} else {
cacheMemory.changePermission(addr, AccessPermission:Busy);
}
}
}
// ** OUT_PORTS **
out_port(dataNetwork_out, DataMsg, dataFromCache);
out_port(addressNetwork_out, AddressMsg, addressFromCache);
// ** IN_PORTS **
// Data Network
in_port(dataNetwork_in, DataMsg, dataToCache) {
if (dataNetwork_in.isReady()) {
peek(dataNetwork_in, DataMsg) {
trigger(Event:Data, in_msg.Address);
}
}
}
// Address Network
in_port(addressNetwork_in, AddressMsg, addressToCache) {
if (addressNetwork_in.isReady()) {
peek(addressNetwork_in, AddressMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETS, in_msg.Address);
} else {
trigger(Event:Other_GETS, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.Address);
} else {
trigger(Event:Other_GETX, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GET_INSTR, in_msg.Address);
} else {
trigger(Event:Other_GET_INSTR, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_PUTX, in_msg.Address);
} else {
trigger(Event:Other_PUTX, in_msg.Address);
}
} else {
error("Unexpected message");
}
}
}
}
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg) {
if (cacheMemory.cacheAvail(in_msg.Address) == false) {
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
} else {
if (in_msg.Type == CacheRequestType:LD) {
trigger(Event:Load, in_msg.Address);
} else if (in_msg.Type == CacheRequestType:IFETCH) {
trigger(Event:Ifetch, in_msg.Address);
} else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
trigger(Event:Store, in_msg.Address);
} else {
error("Invalid CacheRequestType");
}
}
}
}
}
// Optional Queue
in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
if (optionalQueue_in.isReady()) {
peek(optionalQueue_in, CacheMsg) {
if (cacheMemory.cacheAvail(in_msg.Address) == false) {
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
} else {
if ((in_msg.Type == CacheRequestType:LD) || (in_msg.Type == CacheRequestType:IFETCH)) {
trigger(Event:Load_prefetch, in_msg.Address);
} else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
trigger(Event:Store_prefetch, in_msg.Address);
} else {
error("Invalid CacheRequestType");
}
}
}
}
}
// ACTIONS
action(a_allocateTBE, "a", desc="Allocate TBE with Address=B, ForwardID=null, RetryCount=zero, ForwardIDRetryCount=zero, ForwardProgressBit=unset.") {
check_allocate(TBEs);
TBEs.allocate(address);
TBEs[address].isPrefetch := false;
TBEs[address].ForwardIDs.clear();
// Keep the TBE state consistent with the cache state
if (cacheMemory.isTagPresent(address)) {
TBEs[address].TBEState := cacheMemory[address].CacheState;
}
}
action(b_setPrefetchBit, "b", desc="Set prefetch bit in TBE.") {
TBEs[address].isPrefetch := true;
}
action(c_allocateCacheBlock, "c", desc="Set cache tag equal to tag of block B.") {
if (cacheMemory.isTagPresent(address) == false) {
cacheMemory.allocate(address);
}
}
action(d_deallocateTBE, "d", desc="Deallocate TBE.") {
TBEs.deallocate(address);
}
action(e_recordForwardingInfo, "e", desc="Record ID of other processor in ForwardID.") {
peek(addressNetwork_in, AddressMsg){
TBEs[address].ForwardIDs.add(in_msg.Requestor);
TBEs[address].ForwardAddress := in_msg.Address;
}
}
action(f_issueGETS, "f", desc="Issue GETS.") {
enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.CacheState := cache_state_to_int(getState(address));
out_msg.Requestor := machineID;
out_msg.Destination.broadcast(MachineType:L1Cache);
out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(g_issueGETX, "g", desc="Issue GETX.") {
enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.CacheState := cache_state_to_int(getState(address));
out_msg.Requestor := machineID;
out_msg.Destination.broadcast(MachineType:L1Cache);
out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
DEBUG_EXPR(cacheMemory[address].DataBlk);
if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
// Non-prefetch
sequencer.readCallback(address, cacheMemory[address].DataBlk);
} else {
// Prefetch - don't call back
}
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
DEBUG_EXPR(cacheMemory[address].DataBlk);
if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
// Non-prefetch
sequencer.writeCallback(address, cacheMemory[address].DataBlk);
} else {
// Prefetch - don't call back
}
}
action(i_popAddressQueue, "i", desc="Pop incoming address queue.") {
addressNetwork_in.dequeue();
}
action(j_popDataQueue, "j", desc="Pop incoming data queue.") {
dataNetwork_in.dequeue();
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
action(l_popOptionalQueue, "l", desc="Pop optional queue.") {
optionalQueue_in.dequeue();
}
action(o_cacheToForward, "o", desc="Send data from the cache to the processor indicated by ForwardIDs.") {
peek(dataNetwork_in, DataMsg){
// This has a CACHE_RESPONSE_LATENCY latency because we want to avoid the
// timing strangeness that can occur if requests that source the
// data from the TBE are faster than data sourced from the cache
enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY"){
out_msg.Address := TBEs[address].ForwardAddress;
out_msg.Sender := machineID;
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.Destination := TBEs[address].ForwardIDs;
out_msg.DestMachine := MachineType:L1Cache;
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
action(p_issuePUTX, "p", desc="Issue PUTX.") {
enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.CacheState := cache_state_to_int(getState(address));
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
out_msg.Destination.add(machineID); // Back to us
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(q_writeDataFromCacheToTBE, "q", desc="Write data from the cache into the TBE.") {
TBEs[address].DataBlk := cacheMemory[address].DataBlk;
DEBUG_EXPR(TBEs[address].DataBlk);
}
action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
peek(addressNetwork_in, AddressMsg) {
enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
DEBUG_EXPR(cacheMemory[address].DataBlk);
}
}
action(s_saveDataInTBE, "s", desc="Save data in data field of TBE.") {
peek(dataNetwork_in, DataMsg) {
TBEs[address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(TBEs[address].DataBlk);
}
}
action(t_issueGET_INSTR, "t", desc="Issue GETInstr.") {
enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
out_msg.CacheState := cache_state_to_int(getState(address));
out_msg.Requestor := machineID;
out_msg.Destination.broadcast(MachineType:L1Cache);
out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(w_writeDataFromTBEToCache, "w", desc="Write data from the TBE into the cache.") {
cacheMemory[address].DataBlk := TBEs[address].DataBlk;
DEBUG_EXPR(cacheMemory[address].DataBlk);
}
action(y_tbeToReq, "y", desc="Send data from the TBE to the requestor.") {
peek(addressNetwork_in, AddressMsg) {
enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") { // Either this or the PutX should have a real latency
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
cacheMemory.deallocate(address);
}
action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
}
// TRANSITIONS
// Transitions from Idle
transition({NP, I}, Load, IS_AD) {
f_issueGETS;
c_allocateCacheBlock;
a_allocateTBE;
k_popMandatoryQueue;
}
transition({NP, I}, Ifetch, IS_AD) {
t_issueGET_INSTR;
c_allocateCacheBlock;
a_allocateTBE;
k_popMandatoryQueue;
}
transition({NP, I}, Load_prefetch, IS_AD) {
f_issueGETS;
c_allocateCacheBlock;
a_allocateTBE;
b_setPrefetchBit;
l_popOptionalQueue;
}
transition({NP, I}, Store, IM_AD) {
g_issueGETX;
c_allocateCacheBlock;
a_allocateTBE;
k_popMandatoryQueue;
}
transition({NP, I}, Store_prefetch, IM_AD) {
g_issueGETX;
c_allocateCacheBlock;
a_allocateTBE;
b_setPrefetchBit;
l_popOptionalQueue;
}
transition(I, Replacement) {
ff_deallocateCacheBlock; // the cache line is now in NotPresent
}
transition({NP, I}, { Other_GETS, Other_GET_INSTR, Other_GETX } ) {
i_popAddressQueue;
}
// Transitions from Shared
transition(S, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(S, Load_prefetch) {
l_popOptionalQueue;
}
transition(S, Store, SM_AD) {
g_issueGETX;
a_allocateTBE;
k_popMandatoryQueue;
}
transition(S, Store_prefetch, IM_AD) {
g_issueGETX;
a_allocateTBE;
b_setPrefetchBit; // Must be after allocate TBE
l_popOptionalQueue;
}
transition(S, Replacement, I) {
ff_deallocateCacheBlock; // the cache line is now in NotPresent
}
transition(S, {Other_GETS, Other_GET_INSTR}) {
i_popAddressQueue;
}
transition(S, Other_GETX, I) {
i_popAddressQueue;
}
// Transitions from Owned
transition(O, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(O, Store, OM_A){
g_issueGETX;
a_allocateTBE;
k_popMandatoryQueue;
}
transition(O, Load_prefetch) {
l_popOptionalQueue;
}
transition(O, Store_prefetch, OM_A) {
g_issueGETX;
a_allocateTBE;
b_setPrefetchBit;
l_popOptionalQueue;
}
transition(O, Replacement, OI_A) {
p_issuePUTX;
a_allocateTBE;
q_writeDataFromCacheToTBE;// the cache line is now empty
ff_deallocateCacheBlock; // the cache line is now in NotPresent
}
transition(O, {Other_GETS,Other_GET_INSTR}) {
r_cacheToRequestor;
i_popAddressQueue;
}
transition(O, Other_GETX, I) {
r_cacheToRequestor;
i_popAddressQueue;
}
// Transitions from Modified
transition(M, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(M, Store) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(M, {Load_prefetch,Store_prefetch}) {
l_popOptionalQueue;
}
transition(M, Replacement, MI_A) {
p_issuePUTX;
a_allocateTBE;
q_writeDataFromCacheToTBE;// the cache line is now empty
ff_deallocateCacheBlock; // the cache line is now in NotPresent
}
transition(M, {Other_GETS,Other_GET_INSTR}, O) {
r_cacheToRequestor;
i_popAddressQueue;
}
transition(M, Other_GETX, I) {
r_cacheToRequestor;
i_popAddressQueue;
}
// Transitions for Load/Store/Replacement from transient states
transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O}, {Load, Ifetch, Store, Replacement}) {
z_stall;
}
transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IM_D, IM_D_O, SM_D, SM_D_O}, Load_prefetch) {
l_popOptionalQueue;
}
transition({IS_D_I, IM_D_I, IM_D_OI}, Load_prefetch) {
z_stall;
}
transition({IM_AD, SM_AD, OM_A, IM_A, SM_A, IM_D, SM_D}, Store_prefetch) {
l_popOptionalQueue;
}
transition({IS_AD, IS_A, IS_D, IS_D_I, IM_D_O, IM_D_I, IM_D_OI, SM_D_O}, Store_prefetch) {
z_stall;
}
transition({MI_A, OI_A, II_A}, {Load, Ifetch, Store, Load_prefetch, Store_prefetch, Replacement}) {
z_stall;
}
// Always ignore PUTXs which we are not the owner of
transition({NP, I, S, O, M, IS_AD, IM_AD, SM_AD, OM_A, IS_A, IM_A, SM_A, MI_A, OI_A, II_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O }, Other_PUTX) {
i_popAddressQueue;
}
// transitions from IS_AD
transition(IS_AD, {Own_GETS,Own_GET_INSTR}, IS_D) {
i_popAddressQueue;
}
transition(IS_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
i_popAddressQueue;
}
transition(IS_AD, Data, IS_A) {
s_saveDataInTBE;
j_popDataQueue;
}
// Transitions from IM_AD
transition(IM_AD, Own_GETX, IM_D) {
i_popAddressQueue;
}
transition(IM_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
i_popAddressQueue;
}
transition(IM_AD, Data, IM_A) {
s_saveDataInTBE;
j_popDataQueue;
}
// Transitions from OM_A
transition(OM_A, Own_GETX, M){
hh_store_hit;
d_deallocateTBE;
i_popAddressQueue;
}
transition(OM_A, {Other_GETS, Other_GET_INSTR}){
r_cacheToRequestor;
i_popAddressQueue;
}
transition(OM_A, Other_GETX, IM_AD){
r_cacheToRequestor;
i_popAddressQueue;
}
transition(OM_A, Data, IM_A) { // if we get data, we know we're going to lose block before we see own GETX
s_saveDataInTBE;
j_popDataQueue;
}
// Transitions from SM_AD
transition(SM_AD, Own_GETX, SM_D) {
i_popAddressQueue;
}
transition(SM_AD, {Other_GETS,Other_GET_INSTR}) {
i_popAddressQueue;
}
transition(SM_AD, Other_GETX, IM_AD) {
i_popAddressQueue;
}
transition(SM_AD, Data, SM_A) {
s_saveDataInTBE;
j_popDataQueue;
}
// Transitions from IS_A
transition(IS_A, {Own_GETS,Own_GET_INSTR}, S) {
w_writeDataFromTBEToCache;
h_load_hit;
d_deallocateTBE;
i_popAddressQueue;
}
transition(IS_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
i_popAddressQueue;
}
// Transitions from IM_A
transition(IM_A, Own_GETX, M) {
w_writeDataFromTBEToCache;
hh_store_hit;
d_deallocateTBE;
i_popAddressQueue;
}
transition(IM_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
i_popAddressQueue;
}
// Transitions from SM_A
transition(SM_A, Own_GETX, M) {
w_writeDataFromTBEToCache;
hh_store_hit;
d_deallocateTBE;
i_popAddressQueue;
}
transition(SM_A, {Other_GETS,Other_GET_INSTR}) {
i_popAddressQueue;
}
transition(SM_A, Other_GETX, IM_A) {
i_popAddressQueue;
}
// Transitions from MI_A
transition(MI_A, Own_PUTX, I) {
d_deallocateTBE;
i_popAddressQueue;
}
transition(MI_A, {Other_GETS, Other_GET_INSTR}) {
y_tbeToReq;
i_popAddressQueue;
}
transition(MI_A, Other_GETX, II_A) {
y_tbeToReq;
i_popAddressQueue;
}
// Transitions from OI_A
transition(OI_A, Own_PUTX, I) {
d_deallocateTBE;
i_popAddressQueue;
}
transition(OI_A, {Other_GETS, Other_GET_INSTR}) {
y_tbeToReq;
i_popAddressQueue;
}
transition(OI_A, Other_GETX, II_A) {
y_tbeToReq;
i_popAddressQueue;
}
// Transitions from II_A
transition(II_A, Own_PUTX, I) {
d_deallocateTBE;
i_popAddressQueue;
}
transition(II_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
i_popAddressQueue;
}
// Transitions from IS_D, IS_D_I
transition({IS_D, IS_D_I}, {Other_GETS,Other_GET_INSTR}) {
i_popAddressQueue;
}
transition(IS_D, Other_GETX, IS_D_I) {
i_popAddressQueue;
}
transition(IS_D_I, Other_GETX) {
i_popAddressQueue;
}
transition(IS_D, Data, S) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
h_load_hit;
d_deallocateTBE;
j_popDataQueue;
}
transition(IS_D_I, Data, I) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
h_load_hit;
d_deallocateTBE;
j_popDataQueue;
}
// Transitions from IM_D, IM_D_O, IM_D_I, IM_D_OI
transition( IM_D, {Other_GETS,Other_GET_INSTR}, IM_D_O ) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition( IM_D, Other_GETX, IM_D_I ) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition(IM_D_O, {Other_GETS,Other_GET_INSTR} ) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition(IM_D_O, Other_GETX, IM_D_OI) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition( {IM_D_I, IM_D_OI}, {Other_GETS, Other_GET_INSTR, Other_GETX} ) {
i_popAddressQueue;
}
transition(IM_D, Data, M) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
hh_store_hit;
d_deallocateTBE;
j_popDataQueue;
}
transition(IM_D_O, Data, O) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
hh_store_hit;
o_cacheToForward;
d_deallocateTBE;
j_popDataQueue;
}
transition(IM_D_I, Data, I) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
hh_store_hit;
o_cacheToForward;
d_deallocateTBE;
j_popDataQueue;
}
transition(IM_D_OI, Data, I) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
hh_store_hit;
o_cacheToForward;
d_deallocateTBE;
j_popDataQueue;
}
// Transitions for SM_D, SM_D_O
transition(SM_D, {Other_GETS,Other_GET_INSTR}, SM_D_O) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition(SM_D, Other_GETX, IM_D_I) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition(SM_D_O, {Other_GETS,Other_GET_INSTR}) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition(SM_D_O, Other_GETX, IM_D_OI) {
e_recordForwardingInfo;
i_popAddressQueue;
}
transition(SM_D, Data, M) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
hh_store_hit;
d_deallocateTBE;
j_popDataQueue;
}
transition(SM_D_O, Data, O) {
s_saveDataInTBE;
w_writeDataFromTBEToCache;
hh_store_hit;
o_cacheToForward;
d_deallocateTBE;
j_popDataQueue;
}
}

View file

@ -1,4 +0,0 @@
MOSI_SMP_bcast-msg.sm
MOSI_SMP_bcast_1level-cache.sm
MOSI_SMP_bcast-dir.sm
standard_1level_SMP-protocol.sm

View file

@ -1,345 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
machine(Directory, "MOSI Broadcast Optimized") {
MessageBuffer addressFromDir, network="To", virtual_network="0", ordered="true";
MessageBuffer dataFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer addressToDir, network="From", virtual_network="0", ordered="true";
MessageBuffer dataToDir, network="From", virtual_network="1", ordered="false";
enumeration(State, desc="Directory states", default="Directory_State_C") {
C, desc="Cold - no processor has requested this line";
I, desc="Idle";
S, desc="Shared";
SS, desc="Shared, 2 or more shares";
OS, desc="Owned by a cache";
OSS, desc="Owned by a cache, present in at least 3 caches";
M, desc="Modified", format="!b";
}
// ** EVENTS **
enumeration(Event, desc="Directory events") {
// From Address network
OtherAddress, desc="We saw an address msg to someone else";
GETS, desc="A GETS arrives";
GET_INSTR, desc="A GETInstr arrives";
GETX, desc="A GETX arrives", format="!r";
PUTX_Owner, desc="A PUTX arrives, requestor is owner";
PUTX_NotOwner, desc="A PUTX arrives, requestor is not owner", format="!r";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
bool DirOwner, default="true", desc="Is dir owner?";
MachineID ProcOwner, desc="Processor Owner";
DataBlock DataBlk, desc="data for the block";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// to simulate detailed DRAM
external_type(MemoryControl, inport="yes", outport="yes") {
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
MemoryControl memBuffer, constructor_hack="i";
void profile_request(int cache_state, State directory_state, GenericRequestType request_type);
State getState(Address addr) {
if (directory.isPresent(addr)) {
return directory[addr].DirectoryState;
}
return State:C;
}
void setState(Address addr, State state) {
if (directory.isPresent(addr)) {
directory[addr].DirectoryState := state;
}
}
// ** OUT_PORTS **
out_port(dataNetwork_out, DataMsg, dataFromDir);
out_port(addressNetwork_out, AddressMsg, addressFromDir);
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
// Address Network
in_port(addressNetwork_in, AddressMsg, addressToDir) {
if (addressNetwork_in.isReady()) {
peek(addressNetwork_in, AddressMsg) {
if(map_Address_to_Directory(in_msg.Address) != machineID) {
trigger(Event:OtherAddress, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
trigger(Event:GET_INSTR, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
if (in_msg.Requestor == directory[in_msg.Address].ProcOwner && directory[in_msg.Address].DirOwner == false) {
trigger(Event:PUTX_Owner, in_msg.Address);
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address);
}
} else {
error("unexpected message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Address);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Address);
} else {
DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
}
}
// *** ACTIONS ***
action(d_sendDataMsg, "d", desc="Send data message to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(dataNetwork_out, DataMsg, latency="1") {
out_msg.Address := in_msg.Address;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DestMachine := MachineType:L1Cache;
//out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
DEBUG_EXPR(in_msg.OriginalRequestorMachId);
DEBUG_EXPR(out_msg.DataBlk);
}
}
}
action(j_popAddressQueue, "j", desc="Pop address queue.") {
addressNetwork_in.dequeue();
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(p_profile, "p", desc="Profile this transition.") {
peek(addressNetwork_in, AddressMsg) {
profile_request(in_msg.CacheState, getState(address), convertToGenericType(in_msg.Type));
}
}
action(m_setOwnerRequestor, "m", desc="Set owner = requestor") {
peek(addressNetwork_in, AddressMsg) {
directory[in_msg.Address].ProcOwner := in_msg.Requestor;
directory[in_msg.Address].DirOwner := false;
}
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(addressNetwork_in, AddressMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := directory[in_msg.Address].DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DEBUG_EXPR(out_msg);
}
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(addressNetwork_in, AddressMsg) {
enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DEBUG_EXPR(out_msg);
}
}
}
action(r_writeDataFromRequest, "r", desc="Write request data to memory") {
peek(addressNetwork_in, AddressMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(x_setOwnerToDirectory, "x", desc="Set owner equal to the directory"){
peek(addressNetwork_in, AddressMsg) {
directory[in_msg.Address].DirOwner := true;
}
}
// TRANSITIONS
// Ignore all address and data messages not bound for us
transition(C, OtherAddress) {
j_popAddressQueue;
}
// PUTX_NotOwner Transitions
transition({I, S, SS, OS, OSS, M}, PUTX_NotOwner) {
p_profile;
j_popAddressQueue;
}
// Transitions from Idle
transition({C, I}, {GETS,GET_INSTR}, S) {
//d_sendDataMsg;
qf_queueMemoryFetchRequest;
p_profile;
j_popAddressQueue;
}
transition({C, I}, GETX, M) {
//d_sendDataMsg;
qf_queueMemoryFetchRequest;
m_setOwnerRequestor;
p_profile;
j_popAddressQueue
}
// Transitions from Shared
transition({S, SS}, {GETS,GET_INSTR}, SS) {
//d_sendDataMsg;
qf_queueMemoryFetchRequest;
p_profile;
j_popAddressQueue;
}
transition({S, SS}, GETX, M) {
//d_sendDataMsg;
qf_queueMemoryFetchRequest;
m_setOwnerRequestor;
p_profile;
j_popAddressQueue;
}
// Transitions from Owned
transition({OS, OSS}, {GETS,GET_INSTR}, OSS) {
p_profile;
j_popAddressQueue;
}
transition({OS, OSS}, GETX, M) {
m_setOwnerRequestor;
p_profile;
j_popAddressQueue;
}
transition(OS, PUTX_Owner, S) {
x_setOwnerToDirectory;
r_writeDataFromRequest;
qw_queueMemoryWBRequest;
p_profile;
j_popAddressQueue;
}
transition(OSS, PUTX_Owner, SS) {
x_setOwnerToDirectory;
r_writeDataFromRequest;
qw_queueMemoryWBRequest;
p_profile;
j_popAddressQueue;
}
// Transitions from Modified
transition(M, {GETS,GET_INSTR}, OS) {
p_profile;
j_popAddressQueue;
}
transition(M, GETX) {
m_setOwnerRequestor;
p_profile;
j_popAddressQueue;
}
transition(M, PUTX_Owner, I) {
x_setOwnerToDirectory;
r_writeDataFromRequest;
qw_queueMemoryWBRequest;
p_profile;
j_popAddressQueue;
}
transition({C, I, S, SS, OS, OSS, M}, Memory_Data) {
d_sendDataMsg;
l_popMemQueue;
}
transition({C, I, S, SS, OS, OSS, M}, Memory_Ack) {
//a_sendAck;
l_popMemQueue;
}
}

View file

@ -1,4 +0,0 @@
MOSI_SMP_bcast-msg.sm
MOSI_SMP_bcast-cache.sm
MOSI_SMP_bcast_m-dir.sm
standard_SMP-protocol.sm

View file

@ -1,838 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOSI_directory_1level-cache.sm 1.18 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
*
*/
machine(L1Cache, "MOSI Directory Optimized") {
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
MessageBuffer forwardedRequestToCache, network="From", virtual_network="1", ordered="true";
MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
// STATES
enumeration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
I, desc="Idle";
S, desc="Shared";
O, desc="Owned";
M, desc="Modified", format="!b";
// Transient States
MI, desc="modified, issued PUTX, have not seen response yet";
OI, desc="owned, issued PUTX, have not seen response yet";
IS, desc="idle, issued GETS, have not seen response yet";
ISI, desc="idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
IM, desc="idle, issued GETX, have not seen response yet";
IMI, desc="idle, issued GETX, saw forwarded GETX";
IMO, desc="idle, issued GETX, saw forwarded GETS";
IMOI, desc="idle, issued GETX, saw forwarded GETS, saw forwarded GETX";
// Note: OM is a strange state, because it is waiting for the line
// to be stolen away, or look like it has been stolen away. The
// common case is that we see a forward from the directory that is
// really from us, we forwarded the data to our dataqueue, and
// everythings works fine.
OM, desc="owned, issued GETX, have not seen response yet";
}
// EVENTS
enumeration(Event, desc="Cache events") {
Load, desc="Load request from the processor";
Load_prefetch, desc="Load prefetch request from the processor";
Ifetch, desc="I-fetch request from the processor";
Store_prefetch, desc="Store prefetch request from the processor";
Store, desc="Store request from the processor";
Replacement, desc="Replacement", format="!r";
Forwarded_GETS, "Forwarded GETS", desc="Directory forwards GETS to us";
Forwarded_GETX, "Forwarded GETX", desc="Directory forwards GETX to us";
INV, "INV", desc="Invalidation", format="!r";
Proc_ack, "Proc ack", desc="Ack from proc";
Proc_last_ack, "Proc last ack", desc="Last ack", format="!r";
Data_ack_0, "Data ack 0", desc="Data with ack count = 0";
Data_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
Data_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="data for the block";
}
// TBE fields
structure(TBE, desc="...") {
Address Address, desc="Physical address for this TBE";
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="Buffer for the data block";
int NumPendingAcks, desc="Number of acks that this processor is waiting for";
NetDest ForwardGetS_IDs, desc="Set of the processors to forward the block";
MachineID ForwardGetX_ID, desc="ID of the processor to forward the block";
int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
bool isPrefetch, desc="Set if this was caused by a prefetch";
}
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
}
external_type(TBETable) {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
MessageBuffer optionalQueue, ordered="false", abstract_chip_ptr="true";
Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
TBETable TBEs, template_hack="<L1Cache_TBE>";
CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified L1"', abstract_chip_ptr="true";
State getState(Address addr) {
if(TBEs.isPresent(addr)) {
return TBEs[addr].TBEState;
} else if (cacheMemory.isTagPresent(addr)) {
return cacheMemory[addr].CacheState;
}
return State:I;
}
void setState(Address addr, State state) {
if (TBEs.isPresent(addr)) {
TBEs[addr].TBEState := state;
}
if (cacheMemory.isTagPresent(addr)) {
cacheMemory[addr].CacheState := state;
// Set permission
if ((state == State:I) || (state == State:MI) || (state == State:OI)) {
cacheMemory.changePermission(addr, AccessPermission:Invalid);
} else if (state == State:S || state == State:O) {
cacheMemory.changePermission(addr, AccessPermission:Read_Only);
} else if (state == State:M) {
cacheMemory.changePermission(addr, AccessPermission:Read_Write);
} else {
cacheMemory.changePermission(addr, AccessPermission:Busy);
}
}
}
// ** OUT_PORTS **
out_port(requestNetwork_out, RequestMsg, requestFromCache);
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
// ** IN_PORTS **
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
if(in_msg.Type == CoherenceResponseType:DATA) {
if(in_msg.NumPendingAcks == 0) {
trigger(Event:Data_ack_0, in_msg.Address);
} else {
if(in_msg.NumPendingAcks + TBEs[in_msg.Address].NumPendingAcks != 0) {
trigger(Event:Data_ack_not_0, in_msg.Address);
} else {
trigger(Event:Data_ack_not_0_last, in_msg.Address);
}
}
} else if(in_msg.Type == CoherenceResponseType:ACK) {
if(TBEs[in_msg.Address].NumPendingAcks != 1){
trigger(Event:Proc_ack, in_msg.Address);
} else {
trigger(Event:Proc_last_ack, in_msg.Address);
}
}
}
}
}
// Forwarded Request network
in_port(forwardedRequestNetwork_in, RequestMsg, forwardedRequestToCache) {
if(forwardedRequestNetwork_in.isReady()) {
peek(forwardedRequestNetwork_in, RequestMsg) {
if(in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Forwarded_GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Forwarded_GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:INV, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Dir_WB_ack, in_msg.Address);
} else {
error("Invalid forwarded request type");
}
}
}
}
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg) {
if (cacheMemory.cacheAvail(in_msg.Address) == false) {
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
} else {
if (in_msg.Type == CacheRequestType:LD) {
trigger(Event:Load, in_msg.Address);
} else if (in_msg.Type == CacheRequestType:IFETCH) {
trigger(Event:Ifetch, in_msg.Address);
} else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
trigger(Event:Store, in_msg.Address);
} else {
error("Invalid CacheRequestType");
}
}
}
}
}
// Optional Queue
in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
if (optionalQueue_in.isReady()) {
peek(optionalQueue_in, CacheMsg) {
if (cacheMemory.cacheAvail(in_msg.Address) == false) {
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
} else {
if (in_msg.Type == CacheRequestType:LD) {
trigger(Event:Load_prefetch, in_msg.Address);
} else if (in_msg.Type == CacheRequestType:IFETCH) {
trigger(Event:Load_prefetch, in_msg.Address);
} else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
trigger(Event:Store_prefetch, in_msg.Address);
} else {
error("Invalid CacheRequestType");
}
}
}
}
}
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(b_issueGETX, "b", desc="Issue GETX") {
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(d_issuePUTX, "d", desc="Issue PUTX") {
enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(e_dataFromCacheToRequestor, "e", desc="Send data from cache to requestor") {
peek(forwardedRequestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state O and we see a GetX
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
DEBUG_EXPR(out_msg.Destination);
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
action(g_allocateCacheBlock, "g", desc="Allocate cache block") {
if (cacheMemory.isTagPresent(address) == false) {
cacheMemory.allocate(address);
}
}
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
DEBUG_EXPR(cacheMemory[address].DataBlk);
if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
// Non-prefetch
sequencer.readCallback(address, cacheMemory[address].DataBlk);
} else {
// Prefetch - don't call back
}
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
DEBUG_EXPR(cacheMemory[address].DataBlk);
if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
// Non-prefetch
sequencer.writeCallback(address, cacheMemory[address].DataBlk);
} else {
// Prefetch - don't call back
}
}
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
check_allocate(TBEs);
TBEs.allocate(address);
TBEs[address].NumPendingAcks := 0; // default value
TBEs[address].isPrefetch := false;
TBEs[address].ForwardGetS_IDs.clear();
}
action(j_setPrefetchBit, "j", desc="Set prefetch bit") {
TBEs[address].isPrefetch := true;
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
forwardedRequestNetwork_in.dequeue();
}
action(m_popOptionalQueue, "m", desc="Pop optional queue") {
optionalQueue_in.dequeue();
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
responseNetwork_in.dequeue();
}
action(p_addNumberOfPendingAcks, "p", desc="Add number of pending acks to TBE") {
peek(responseNetwork_in, ResponseMsg) {
DEBUG_EXPR(TBEs[address].NumPendingAcks);
TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks + in_msg.NumPendingAcks;
DEBUG_EXPR(in_msg.NumPendingAcks);
DEBUG_EXPR(TBEs[address].NumPendingAcks);
}
}
action(q_decrementNumberOfPendingAcks, "q", desc="Decrement number of pending invalidations by one") {
DEBUG_EXPR(TBEs[address].NumPendingAcks);
TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks - 1;
DEBUG_EXPR(TBEs[address].NumPendingAcks);
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
TBEs.deallocate(address);
}
action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
peek(forwardedRequestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
DEBUG_EXPR(out_msg.Destination);
out_msg.NumPendingAcks := 0;
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
action(u_writeDataToCache, "u", desc="Write data to cache") {
peek(responseNetwork_in, ResponseMsg) {
cacheMemory[address].DataBlk := in_msg.DataBlk;
}
}
action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
TBEs[address].DataBlk := cacheMemory[address].DataBlk;
}
action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
peek(forwardedRequestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state MS and we see a GetX
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
DEBUG_EXPR(out_msg.Destination);
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
action(z_stall, "z", desc="Stall") {
}
action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
peek(forwardedRequestNetwork_in, RequestMsg) {
TBEs[address].ForwardGetS_IDs.add(in_msg.Requestor);
}
}
action(ee_dataFromCacheToGetSForwardIDs, "\e", desc="Send data from cache to GetS ForwardIDs") {
// FIXME - In some cases this should be from the TBE, not the cache.
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination := TBEs[address].ForwardGetS_IDs;
out_msg.DestMachine := MachineType:L1Cache;
DEBUG_EXPR(out_msg.Destination);
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.NumPendingAcks := 0;
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
cacheMemory.deallocate(address);
}
action(gg_dataFromCacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
// FIXME - In some cases this should be from the TBE, not the cache.
enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(TBEs[address].ForwardGetX_ID);
out_msg.DestMachine := MachineType:L1Cache;
DEBUG_EXPR(out_msg.Destination);
out_msg.DataBlk := cacheMemory[address].DataBlk;
out_msg.NumPendingAcks := TBEs[address].ForwardGetX_AckCount;
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
peek(forwardedRequestNetwork_in, RequestMsg) {
TBEs[address].ForwardGetX_ID := in_msg.Requestor;
TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingAcks;
}
}
//*****************************************************
// TRANSITIONS
//*****************************************************
// Transitions for Load/Store/Prefetch/Replacement from transient states
transition({OM, OI, IS, ISI, IM, IMO, IMOI, IMI, MI}, {Load, Load_prefetch, Ifetch, Store, Store_prefetch, Replacement}) {
z_stall;
}
// Transitions from Idle
transition(I, {Load,Ifetch}, IS) {
g_allocateCacheBlock;
i_allocateTBE;
a_issueGETS;
k_popMandatoryQueue;
}
transition(I, {Load_prefetch}, IS) {
g_allocateCacheBlock;
i_allocateTBE;
j_setPrefetchBit;
a_issueGETS;
m_popOptionalQueue;
}
transition(I, Store, IM) {
g_allocateCacheBlock;
i_allocateTBE;
b_issueGETX;
k_popMandatoryQueue;
}
transition(I, Store_prefetch, IM) {
g_allocateCacheBlock;
i_allocateTBE;
j_setPrefetchBit;
b_issueGETX;
m_popOptionalQueue;
}
transition(I, Replacement) {
ff_deallocateCacheBlock;
}
transition(I, INV) {
t_sendAckToInvalidator;
l_popForwardedRequestQueue;
}
// Transitions from Shared
transition({S, O}, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition({S, O, M}, Load_prefetch) {
m_popOptionalQueue;
}
transition(S, Store, IM) {
i_allocateTBE;
b_issueGETX;
k_popMandatoryQueue;
}
transition(S, Store_prefetch, IM) {
i_allocateTBE;
j_setPrefetchBit;
b_issueGETX;
m_popOptionalQueue;
}
transition(S, Replacement, I) {
ff_deallocateCacheBlock;
}
transition(S, INV, I) {
t_sendAckToInvalidator;
l_popForwardedRequestQueue;
}
// Transitions from Modified
transition(M, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(M, Store) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(M, Store_prefetch) {
m_popOptionalQueue;
}
transition(M, Replacement, MI) {
i_allocateTBE;
d_issuePUTX;
x_copyDataFromCacheToTBE;
ff_deallocateCacheBlock;
}
transition(M, Forwarded_GETS, O) {
e_dataFromCacheToRequestor;
l_popForwardedRequestQueue;
}
transition(M, Forwarded_GETX, I) {
e_dataFromCacheToRequestor;
l_popForwardedRequestQueue;
}
// Transitions from O
transition(O, Store, OM) {
i_allocateTBE;
b_issueGETX;
k_popMandatoryQueue;
}
transition(O, Store_prefetch, OM) {
i_allocateTBE;
j_setPrefetchBit;
b_issueGETX;
m_popOptionalQueue;
}
transition(O, Replacement, OI){
i_allocateTBE;
d_issuePUTX;
x_copyDataFromCacheToTBE;
ff_deallocateCacheBlock;
}
transition(O, Forwarded_GETS) {
e_dataFromCacheToRequestor;
l_popForwardedRequestQueue;
}
transition(O, Forwarded_GETX, I) {
e_dataFromCacheToRequestor;
l_popForwardedRequestQueue;
}
// transitions from OI
transition(OI, Forwarded_GETS) {
y_dataFromTBEToRequestor;
l_popForwardedRequestQueue;
}
transition(OI, Forwarded_GETX) {
y_dataFromTBEToRequestor;
l_popForwardedRequestQueue;
}
transition(OI, Dir_WB_ack, I) {
s_deallocateTBE;
l_popForwardedRequestQueue;
}
// Transitions from IS
transition(IS, INV, ISI) {
t_sendAckToInvalidator;
l_popForwardedRequestQueue;
}
transition(IS, Data_ack_0, S) {
u_writeDataToCache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// transitions from ISI
// in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
// or, could get data from Dir if Dir's data lost race to Dir's INV
// or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
// processor already wrote it back
transition(ISI, Data_ack_0, I) {
u_writeDataToCache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(ISI, INV) {
t_sendAckToInvalidator;
l_popForwardedRequestQueue;
}
// Transitions from IM
transition(IM, INV) { // do not need to go to IMI, since INV is for earlier epoch
t_sendAckToInvalidator;
l_popForwardedRequestQueue;
}
transition({IM, IMO}, Forwarded_GETS, IMO) {
dd_recordGetSForwardID;
l_popForwardedRequestQueue;
}
transition(IM, Forwarded_GETX, IMI) {
ii_recordGetXForwardID;
l_popForwardedRequestQueue;
}
transition(IM, {Data_ack_0, Data_ack_not_0_last}, M) {
u_writeDataToCache;
hh_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(IM, Data_ack_not_0) {
u_writeDataToCache;
p_addNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IM, Proc_ack) {
q_decrementNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IM, Proc_last_ack, M) {
hh_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// transitions from IMO
transition(IMO, Forwarded_GETX, IMOI) {
ii_recordGetXForwardID;
l_popForwardedRequestQueue;
}
transition(IMO, {Data_ack_0, Data_ack_not_0_last}, O) {
u_writeDataToCache;
hh_store_hit;
ee_dataFromCacheToGetSForwardIDs;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(IMO, Data_ack_not_0) {
u_writeDataToCache;
p_addNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IMO, Proc_ack) {
q_decrementNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IMO, Proc_last_ack, O) {
hh_store_hit;
ee_dataFromCacheToGetSForwardIDs;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// transitions from IMI
transition(IMI, {Data_ack_0, Data_ack_not_0_last}, I) {
u_writeDataToCache;
hh_store_hit;
gg_dataFromCacheToGetXForwardID;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(IMI, Data_ack_not_0) {
u_writeDataToCache;
p_addNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IMI, Proc_ack) {
q_decrementNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IMI, Proc_last_ack, I) {
hh_store_hit;
gg_dataFromCacheToGetXForwardID;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// transitions from IMOI
transition(IMOI, {Data_ack_0, Data_ack_not_0_last}, I) {
u_writeDataToCache;
hh_store_hit;
ee_dataFromCacheToGetSForwardIDs;
gg_dataFromCacheToGetXForwardID;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(IMOI, Data_ack_not_0) {
u_writeDataToCache;
p_addNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IMOI, Proc_ack) {
q_decrementNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(IMOI, Proc_last_ack, I) {
hh_store_hit;
ee_dataFromCacheToGetSForwardIDs;
gg_dataFromCacheToGetXForwardID;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// Transitions from OM
transition(OM, Proc_ack) {
q_decrementNumberOfPendingAcks;
o_popIncomingResponseQueue;
}
transition(OM, Forwarded_GETS) {
e_dataFromCacheToRequestor;
l_popForwardedRequestQueue;
}
transition(OM, Forwarded_GETX, IM) {
e_dataFromCacheToRequestor;
l_popForwardedRequestQueue;
}
// Transitions from MI
transition(MI, Forwarded_GETS) {
y_dataFromTBEToRequestor;
l_popForwardedRequestQueue;
}
transition(MI, Forwarded_GETX) {
y_dataFromTBEToRequestor;
l_popForwardedRequestQueue;
}
transition(MI, Dir_WB_ack, I) {
s_deallocateTBE;
l_popForwardedRequestQueue;
}
}

View file

@ -1,333 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOSI_directory-dir.sm 1.14 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
*/
machine(Directory, "MOSI Directory Optimized") {
MessageBuffer forwardedRequestFromDir, network="To", virtual_network="1", ordered="true";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
// STATES
enumeration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, desc="Idle";
S, desc="Shared";
O, desc="Owned";
M, desc="Modified", format="!b";
}
// Events
enumeration(Event, desc="Directory events") {
GETS, desc="A GETS arrives";
GETX_Owner, desc="A GETX arrives, requestor is owner";
GETX_NotOwner, desc="A GETX arrives, requestor is not owner";
PUTX_Owner, "PUTX (requestor is owner)", desc="A PUTX arrives, requestor is owner";
PUTX_NotOwner, "PUTX (requestor not owner)",desc="A PUTX arrives, requestor is not owner";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
NetDest Sharers, desc="Set of sharers";
bool DirOwner, default="true", desc="Is dir owner?";
MachineID ProcOwner, desc="Processor owner";
DataBlock DataBlk, desc="data for the block";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
State getState(Address addr) {
if (directory.isPresent(addr)) {
return directory[addr].DirectoryState;
}
return State:I;
}
void setState(Address addr, State state) {
if (directory.isPresent(addr)) {
directory[addr].DirectoryState := state;
}
}
// ** OUT_PORTS **
out_port(forwardedRequestNetwork_out, RequestMsg, forwardedRequestFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
if(directory[in_msg.Address].DirOwner == false && in_msg.Requestor == directory[in_msg.Address].ProcOwner) {
trigger(Event:GETX_Owner, in_msg.Address);
} else {
trigger(Event:GETX_NotOwner, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
if (directory[in_msg.Address].DirOwner == false && in_msg.Requestor == directory[in_msg.Address].ProcOwner) {
trigger(Event:PUTX_Owner, in_msg.Address);
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address);
}
} else {
error("Invalid message");
}
}
}
}
// Actions
// a_addRequestorToSharers
action(a_addRequestorToSharers, "a", desc="Add requestor to list of sharers") {
peek(requestNetwork_in, RequestMsg) {
directory[address].Sharers.add(in_msg.Requestor);
DEBUG_EXPR(directory[address].Sharers);
}
}
// b_dataToRequestor
action(b_dataToRequestor, "b", desc="Send data to requestor") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
if(in_msg.Type == CoherenceRequestType:GETX) {
out_msg.NumPendingAcks := directory[address].Sharers.count();
} else {
out_msg.NumPendingAcks := 0; // don't need to send pending ack count to GETS requestor
}
out_msg.Destination.add(in_msg.Requestor);
out_msg.DestMachine := MachineType:L1Cache;
out_msg.DataBlk := directory[address].DataBlk;
DEBUG_EXPR(out_msg.NumPendingAcks);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
// d_forwardRequestToOwner
action(d_forwardRequestToOwner, "d", desc="Forward request to owner") {
peek(requestNetwork_in, RequestMsg) {
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination.add(directory[address].ProcOwner);
DEBUG_EXPR(out_msg.Destination);
if(in_msg.Type == CoherenceRequestType:GETX) {
out_msg.NumPendingAcks := directory[address].Sharers.count();
} else {
out_msg.NumPendingAcks := 0; // don't need to send pending ack count to GETS requestor
}
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
action(f_setOwnerToRequestor, "f", desc="Set owner equal to requestor") {
peek(requestNetwork_in, RequestMsg) {
directory[address].ProcOwner := in_msg.Requestor;
directory[address].DirOwner := false;
}
DEBUG_EXPR(directory[address].ProcOwner);
}
action(g_clearSharers, "g", desc="Clear list of sharers") {
directory[address].Sharers.clear();
}
// currently done via multicast message
action(h_invToSharers, "h", desc="Send INVs to all sharers") {
peek(requestNetwork_in, RequestMsg) {
if(directory[address].Sharers.count() != 0){
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
out_msg.Destination := directory[address].Sharers;
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
DEBUG_EXPR(directory[address].Sharers);
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(l_writeRequestDataToMemory, "l", desc="Write PUTX/DWN data to memory") {
peek(requestNetwork_in, RequestMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(n_writebackAckToRequestor, "n", desc="Send WB_ack to requestor") {
peek(requestNetwork_in, RequestMsg) {
// This needs to be DIRECTORY_LATENCY to keep the queue fifo
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
action(p_clearOwner, "p", desc="Clear owner") {
directory[address].DirOwner := true; // set owner equal to dir
}
action(r_addOwnerToSharers, "r", desc="Add owner to list of sharers") {
directory[address].Sharers.add(directory[address].ProcOwner);
}
action(t_removeOwnerFromSharers, "t", desc="Remove owner from list of sharers") {
directory[address].Sharers.remove(directory[address].ProcOwner);
}
action(u_removeRequestorFromSharers, "u", desc="Remove requestor from list of sharers") {
peek(requestNetwork_in, RequestMsg) {
directory[address].Sharers.remove(in_msg.Requestor);
}
}
// TRANSITIONS
transition({I, S, M, O}, PUTX_NotOwner) {
n_writebackAckToRequestor;
j_popIncomingRequestQueue;
}
// Transitions from Idle
transition(I, GETS, S) {
a_addRequestorToSharers;
b_dataToRequestor;
j_popIncomingRequestQueue;
}
transition(I, GETX_NotOwner, M) {
f_setOwnerToRequestor;
b_dataToRequestor;
j_popIncomingRequestQueue;
}
// Transitions from Shared
transition(S, GETS) {
a_addRequestorToSharers;
b_dataToRequestor;
j_popIncomingRequestQueue;
}
transition(S, GETX_NotOwner, M) {
u_removeRequestorFromSharers;
b_dataToRequestor;
f_setOwnerToRequestor;
h_invToSharers;
g_clearSharers;
j_popIncomingRequestQueue;
}
// Transitions from Owned
transition(O, GETS) {
a_addRequestorToSharers;
d_forwardRequestToOwner;
j_popIncomingRequestQueue;
}
transition(O, {GETX_NotOwner, GETX_Owner}, M) {
u_removeRequestorFromSharers;
t_removeOwnerFromSharers;
d_forwardRequestToOwner;
f_setOwnerToRequestor;
h_invToSharers;
g_clearSharers;
j_popIncomingRequestQueue;
}
transition(O, PUTX_Owner, S) {
u_removeRequestorFromSharers;
l_writeRequestDataToMemory;
n_writebackAckToRequestor;
p_clearOwner;
j_popIncomingRequestQueue;
}
// Transitions from Modified
transition(M, GETS, O) {
a_addRequestorToSharers;
r_addOwnerToSharers;
d_forwardRequestToOwner;
j_popIncomingRequestQueue;
}
transition(M, GETX_NotOwner) {
d_forwardRequestToOwner;
f_setOwnerToRequestor;
j_popIncomingRequestQueue;
}
transition(M, PUTX_Owner, I) {
l_writeRequestDataToMemory;
n_writebackAckToRequestor;
p_clearOwner;
j_popIncomingRequestQueue;
}
}

View file

@ -1,74 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOSI_directory-msg.sm 1.9 04/08/09 16:11:38-05:00 mikem@maya.cs.wisc.edu $
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
GETS, desc="Get Shared";
GET_INSTR, desc="Get Instruction";
PUTX, desc="Put eXclusive";
INV, desc="INValidate";
WB_ACK, desc="Write Back ACKnowledgment";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
ACK, desc="ACKnowledgment";
NACK, desc="Negative ACKnowledgment";
DATA, desc="Data";
}
// RequestMsg
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
NetDest Destination, desc="Multicast destination mask";
DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
int NumPendingAcks, desc="Number of acks to wait for"; // Needed for forwarded responses only
MessageSizeType MessageSize, desc="size category of the message";
}
// ResponseMsg
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID Sender, desc="Node who sent the data";
MachineType SenderMachine, desc="What component sent the data";
NetDest Destination, desc="Node to whom the data is sent";
MachineType DestMachine, desc="What component receives the data";
DataBlock DataBlk, desc="data for the cache line";
int NumPendingAcks, desc="Number of acks to wait for";
MessageSizeType MessageSize, desc="size category of the message";
}

View file

@ -1,4 +0,0 @@
MOSI_SMP_directory_1level-msg.sm
MOSI_SMP_directory_1level-cache.sm
MOSI_SMP_directory_1level-dir.sm
standard_1level_SMP-protocol.sm

View file

@ -1,799 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
machine(L1Cache, "MSI Directory L1 Cache CMP") {
// NODE L1 CACHE
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="true";
MessageBuffer dummyFrom1, network="To", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
MessageBuffer dummyFrom2, network="To", virtual_network="2", ordered="false"; // dummy buffer that shouldn't be used
// a local L1 -> this L2 bank
MessageBuffer responseFromL1Cache, network="To", virtual_network="3", ordered="false";
MessageBuffer dummyFrom4, network="To", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
// To this node's L1 cache FROM the network
MessageBuffer dummyTo0, network="From", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
MessageBuffer dummyTo1, network="From", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
// a L2 bank -> this L1
MessageBuffer requestToL1Cache, network="From", virtual_network="2", ordered="true";
// a L2 bank -> this L1
MessageBuffer responseToL1Cache, network="From", virtual_network="3", ordered="false";
MessageBuffer dummyTo4, network="From", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
// STATES
enumeration(State, desc="Cache states", default="L1Cache_State_L1_I") {
// Base states
NP, desc="Not present in either cache";
L1_I, desc="a L1 cache entry Idle";
L1_S, desc="a L1 cache entry Shared";
L1_M, desc="a L1 cache entry Modified", format="!b";
// Transient States
L1_IS, desc="L1 idle, issued GETS, have not seen response yet";
L1_ISI, desc="L1 idle, issued GETS, saw INV, still waiting for data";
L1_IM, desc="L1 idle, issued GETX, have not seen response yet";
L1_IMI, desc="L1 idle, issued GETX, saw INV, still waiting for data";
L1_IMS, desc="L1 idle, issued GETX, saw DownGrade, still waiting for data";
L1_IMSI, desc="L1 idle, issued GETX, saw DownGrade, saw INV, still waiting for data";
L1_SI, desc="issued PUTS, waiting for response";
L1_MI, desc="issued PUTX, waiting for response";
}
// EVENTS
enumeration(Event, desc="Cache events") {
// L1 events
Load, desc="Load request from the home processor";
Ifetch, desc="I-fetch request from the home processor";
Store, desc="Store request from the home processor";
// L1 is required to send response to the L2 immediately
L1_INV, "INV", desc="L1 Invalidation of M data", format="!r";
L1_INV_S, "INV", desc="L1 Invalidation of S data", format="!r";
L1_DownGrade, "Force DownGrade", desc="L2 cache forces an L1 cache in M to downgrade to S and writeback result";
// receiving of data
L1_Data, "Data", desc="Data in response to an L1 request, transistion to M or S depending on request";
L1_Data_S, "Data S", desc="Data in response to an L1 request, write data then transistion to S";
L1_Data_I, "Data I", desc="Data in response to an L1 request, write data then transistion to I";
// receiving of acks
L1_PutAck, "Put Ack", desc="PutS or PutX ack from L2";
// internal generated request
// L1 request to replace block, results in either a PUTS or PUTX request
L1_Replacement, desc="L1 Replacement", format="!r";
// Currently same as replacement, request initiated when block is in the wrong L1 cache
L1_WriteBack, desc="on-chip L1 cache must write back to shared L2";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="data for the block";
}
// TBE fields
structure(TBE, desc="...") {
Address Address, desc="Physical address for this TBE";
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="Buffer for the data block";
bool isPrefetch, desc="Set if this was caused by a prefetch";
}
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
}
external_type(TBETable) {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
MessageBuffer mandatoryQueue, ordered="false", rank="100", abstract_chip_ptr="true";
// the optionalQueue doesn't have to be ordered for correctness
// however inforcing order ensures the prefetches reach the L2 in order
MessageBuffer optionalQueue, ordered="true", rank="101", abstract_chip_ptr="true";
Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
int cache_state_to_int(State state);
// inclusive cache returns L1 entries only
Entry getL1CacheEntry(Address addr), return_by_ref="yes" {
if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory[addr];
} else {
return L1IcacheMemory[addr];
}
}
void changeL1Permission(Address addr, AccessPermission permission) {
if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory.changePermission(addr, permission);
} else if(L1IcacheMemory.isTagPresent(addr)) {
return L1IcacheMemory.changePermission(addr, permission);
} else {
error("cannot change permission, L1 block not present");
}
}
bool isL1CacheTagPresent(Address addr) {
return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
}
State getState(Address addr) {
if((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == true){
DEBUG_EXPR(id);
DEBUG_EXPR(addr);
}
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
if(L1_TBEs.isPresent(addr)) {
return L1_TBEs[addr].TBEState;
} else if (isL1CacheTagPresent(addr)) {
return getL1CacheEntry(addr).CacheState;
}
return State:NP;
}
std::string getStateStr(Address addr) {
return L1Cache_State_to_string(getState(addr));
}
// when is this called?
void setState(Address addr, State state) {
assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
// MUST CHANGE
if(L1_TBEs.isPresent(addr)) {
L1_TBEs[addr].TBEState := state;
}
if (isL1CacheTagPresent(addr)) {
getL1CacheEntry(addr).CacheState := state;
// Set permission
if (state == State:L1_I || state == State:L1_SI || state == State:L1_MI) {
changeL1Permission(addr, AccessPermission:Invalid);
} else if (state == State:L1_S) {
changeL1Permission(addr, AccessPermission:Read_Only);
} else if (state == State:L1_M) {
changeL1Permission(addr, AccessPermission:Read_Write);
} else {
changeL1Permission(addr, AccessPermission:Busy);
}
}
}
Event mandatory_request_type_to_event(CacheRequestType type) {
if (type == CacheRequestType:LD) {
return Event:Load;
} else if (type == CacheRequestType:IFETCH) {
return Event:Ifetch;
} else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
return Event:Store;
} else {
error("Invalid CacheRequestType");
}
}
// ** OUT_PORTS **
// All ports are to the same CMP network, queue id numbers determine IntraChip Switch location
out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
// ** IN_PORTS **
in_port(dummyTo0_in, RequestMsg, dummyTo0) {
if (dummyTo0_in.isReady()) {
peek(dummyTo0_in, RequestMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(machineID);
DEBUG_EXPR(in_msg.Type);
DEBUG_EXPR(getState(in_msg.Address));
DEBUG_EXPR(in_msg.RequestorMachId);
}
error("dummyTo0 port should not be used");
}
}
in_port(dummyTo1_in, RequestMsg, dummyTo1) {
if (dummyTo1_in.isReady()) {
peek(dummyTo1_in, RequestMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(machineID);
DEBUG_EXPR(in_msg.Type);
DEBUG_EXPR(getState(in_msg.Address));
DEBUG_EXPR(in_msg.RequestorMachId);
}
error("dummyTo1 port should not be used");
}
}
in_port(dummyTo4_in, ResponseMsg, dummyTo4) {
if (dummyTo4_in.isReady()) {
peek(dummyTo4_in, ResponseMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(machineID);
DEBUG_EXPR(in_msg.Type);
DEBUG_EXPR(getState(in_msg.Address));
DEBUG_EXPR(in_msg.SenderMachId);
}
error("dummyTo4 port should not be used");
}
}
// Response IntraChip L1 Network - response msg to this L1 cache
in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
if (responseIntraChipL1Network_in.isReady()) {
peek(responseIntraChipL1Network_in, ResponseMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.Destination);
DEBUG_EXPR(in_msg.SenderMachId);
DEBUG_EXPR(machineID);
assert(in_msg.Destination.isElement(machineID));
if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache) {
if(in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:L1_Data, in_msg.Address); // L1 now has data in its desired state
} else if(in_msg.Type == CoherenceResponseType:DATA_S) {
trigger(Event:L1_Data_S, in_msg.Address); // L1 now has data but must imediately move to S state
} else if(in_msg.Type == CoherenceResponseType:DATA_I) {
trigger(Event:L1_Data_I, in_msg.Address); // L1 now has data but must imediately move to INV state
} else if(in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:L1_PutAck, in_msg.Address);
} else {
error("Invalid L1 response type");
}
} else {
error("A non-L2 cache sent a response to a L1 cache");
}
}
}
}
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
if(requestIntraChipL1Network_in.isReady()) {
peek(requestIntraChipL1Network_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if(machineIDToMachineType(in_msg.RequestorMachId) == MachineType:L2Cache) {
if(in_msg.Type == CoherenceRequestType:L1_DG) {
trigger(Event:L1_DownGrade, in_msg.Address); // Force L1 to downgrade to S state
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:L1_INV, in_msg.Address); // L1 must invalidate it's modified version
} else if (in_msg.Type == CoherenceRequestType:INV_S) {
trigger(Event:L1_INV_S, in_msg.Address); // L1 must invalidate it's shared version
} else {
error("Invalid forwarded request type");
}
} else {
error("A non-L2 cache sent a request to a L1 cache");
}
}
}
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg) {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
if (in_msg.Type == CacheRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_WriteBack, in_msg.Address);
}
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_WriteBack, in_msg.Address);
}
if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
}
}
}
}
}
}
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.L1CacheStateStr := getStateStr(address);
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.RequestorMachId := machineID;
DEBUG_EXPR(machineID);
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.L1CacheStateStr := getStateStr(address);
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:UPGRADE;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.L1CacheStateStr := getStateStr(address);
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(f_issueGETINSTR, "g", desc="Issue GETINSTR") {
peek(mandatoryQueue_in, CacheMsg) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.L1CacheStateStr := getStateStr(address);
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(d_issuePUTX, "d", desc="Issue PUTX") {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
DEBUG_EXPR(out_msg.DataBlk);
out_msg.MessageSize := MessageSizeType:Data;
out_msg.L1CacheStateStr := getStateStr(address);
}
}
action(q_issuePUTS, "q", desc="Issue PUTS") {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTS;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
out_msg.L1CacheStateStr := getStateStr(address);
}
}
// L1 responding to a L2 request with data
action(e_dataFromL1CacheToL2Cache, "e", desc="Send data from L1 cache to L2 Cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.SenderMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.DataBlk := getL1CacheEntry(address).DataBlk;
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
DEBUG_EXPR(out_msg.DataBlk);
out_msg.MessageSize := MessageSizeType:Data;
}
}
action(f_dataFromTBEToL2Cache, "f", desc="Send data from L1_TBE to L2 Cache") {
peek(requestIntraChipL1Network_in, RequestMsg) {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.SenderMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
out_msg.DataBlk := L1_TBEs[in_msg.Address].DataBlk;
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
DEBUG_EXPR(out_msg.DataBlk);
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
// L1 responding to a L2 request with an invadiation ack
action(t_sendInvAckToL2Cache, "t", desc="Send Invadiation ack to L2 Cache") {
enqueue(responseIntraChipL1Network_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:INV_ACK;
out_msg.SenderMachId := machineID;
out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
DEBUG_EXPR(address);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
}
}
action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
sequencer.readCallback(address, getL1CacheEntry(address).DataBlk);
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
DEBUG_EXPR(getL1CacheEntry(address).DataBlk);
sequencer.writeCallback(address, getL1CacheEntry(address).DataBlk);
}
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
L1_TBEs[address].isPrefetch := false;
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue();
}
action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
profileMsgDelay(3, responseIntraChipL1Network_in.dequeue_getDelayCycles());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
L1_TBEs.deallocate(address);
}
action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
peek(responseIntraChipL1Network_in, ResponseMsg) {
getL1CacheEntry(address).DataBlk := in_msg.DataBlk;
}
}
action(x_copyDataFromL1CacheToTBE, "x", desc="Copy data from cache to TBE") {
L1_TBEs[address].DataBlk := getL1CacheEntry(address).DataBlk;
}
action(z_stall, "z", desc="Stall") {
}
action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
} else {
L1IcacheMemory.deallocate(address);
}
}
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
if (L1DcacheMemory.isTagPresent(address) == false) {
L1DcacheMemory.allocate(address);
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
if (L1IcacheMemory.isTagPresent(address) == false) {
L1IcacheMemory.allocate(address);
}
}
//*****************************************************
// TRANSITIONS
//*****************************************************
// Transitions for Load/Store/Replacement/WriteBack from transient states
transition({L1_IS, L1_IM, L1_ISI, L1_IMI, L1_IMS, L1_IMSI, L1_SI, L1_MI}, {Load, Ifetch, Store, L1_Replacement, L1_WriteBack}) {
z_stall;
}
// Transitions from Idle
transition({NP,L1_I}, {L1_Replacement, L1_WriteBack}) {
ff_deallocateL1CacheBlock;
}
transition({NP,L1_I}, Load, L1_IS) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
k_popMandatoryQueue;
}
transition({NP,L1_I}, Ifetch, L1_IS) {
pp_allocateL1ICacheBlock;
i_allocateTBE;
f_issueGETINSTR;
k_popMandatoryQueue;
}
transition({NP,L1_I}, Store, L1_IM) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
k_popMandatoryQueue;
}
// Transitions from Shared
transition({L1_S}, {Load,Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(L1_S, Store, L1_IM) {
i_allocateTBE;
c_issueUPGRADE;
k_popMandatoryQueue;
}
transition(L1_S, {L1_Replacement,L1_WriteBack}, L1_SI) {
i_allocateTBE;
q_issuePUTS;
x_copyDataFromL1CacheToTBE;
ff_deallocateL1CacheBlock;
}
transition(L1_S, L1_INV_S, L1_I) {
t_sendInvAckToL2Cache;
l_popRequestQueue;
}
// Transitions from Modified
transition(L1_M, {Load, Ifetch}) {
h_load_hit;
k_popMandatoryQueue;
}
transition(L1_M, Store) {
hh_store_hit;
k_popMandatoryQueue;
}
transition(L1_M, {L1_Replacement, L1_WriteBack}, L1_MI) {
i_allocateTBE;
d_issuePUTX;
x_copyDataFromL1CacheToTBE;
ff_deallocateL1CacheBlock;
}
transition(L1_M, L1_INV, L1_I) {
e_dataFromL1CacheToL2Cache;
l_popRequestQueue;
}
transition(L1_M, L1_DownGrade, L1_S) {
e_dataFromL1CacheToL2Cache;
l_popRequestQueue;
}
// Transitions from L1_IS
transition(L1_IS, L1_INV_S, L1_ISI) {
t_sendInvAckToL2Cache;
l_popRequestQueue;
}
transition(L1_IS, L1_Data, L1_S) {
u_writeDataToL1Cache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(L1_IS, L1_Data_I, L1_I) {
u_writeDataToL1Cache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// Transitions from L1_ISI
transition(L1_ISI, L1_Data, L1_I) {
u_writeDataToL1Cache;
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// Transitions from L1_IM
transition(L1_IM, L1_INV, L1_IMI) { // we don't have to respond immediately because we know the data is coming
l_popRequestQueue;
}
transition(L1_IM, L1_INV_S) {
t_sendInvAckToL2Cache;
l_popRequestQueue;
}
transition(L1_IM, L1_DownGrade, L1_IMS) {
l_popRequestQueue;
}
transition(L1_IM, L1_Data, L1_M) {
u_writeDataToL1Cache;
hh_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
}
transition(L1_IM, L1_Data_S, L1_S) {
u_writeDataToL1Cache;
hh_store_hit;
s_deallocateTBE;
e_dataFromL1CacheToL2Cache;
o_popIncomingResponseQueue;
}
transition(L1_IM, L1_Data_I, L1_I) {
u_writeDataToL1Cache;
hh_store_hit;
s_deallocateTBE;
e_dataFromL1CacheToL2Cache;
o_popIncomingResponseQueue;
}
// Transitions from L1_IMI - data should arrive and no request are possilbe
transition(L1_IMI, L1_Data, L1_I) {
u_writeDataToL1Cache;
hh_store_hit;
s_deallocateTBE;
e_dataFromL1CacheToL2Cache;
o_popIncomingResponseQueue;
}
// Transitions from L1_IMS
transition(L1_IMS, L1_Data, L1_S) {
u_writeDataToL1Cache;
hh_store_hit;
s_deallocateTBE;
e_dataFromL1CacheToL2Cache;
o_popIncomingResponseQueue;
}
transition(L1_IMS, L1_INV_S, L1_IMSI) {
l_popRequestQueue;
}
// Transitions from L1_IMSI
transition(L1_IMSI, L1_Data, L1_I) {
u_writeDataToL1Cache;
hh_store_hit;
s_deallocateTBE;
e_dataFromL1CacheToL2Cache;
o_popIncomingResponseQueue;
}
// Transitions from L1_SI
transition(L1_SI, L1_INV_S) {
t_sendInvAckToL2Cache;
l_popRequestQueue;
}
transition(L1_SI, L1_PutAck, L1_I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
}
// Transitions from L1_MI
transition(L1_MI, L1_INV) {
f_dataFromTBEToL2Cache;
l_popRequestQueue;
}
transition(L1_MI, L1_DownGrade, L1_SI) {
f_dataFromTBEToL2Cache;
l_popRequestQueue;
}
transition(L1_MI, L1_PutAck, L1_I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,497 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
machine(Directory, "MOSI Directory Optimized") {
// ** OUT QUEUES **
MessageBuffer dummyFrom0, network="To", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
MessageBuffer dummyFrom1, network="To", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
// Dir -> mod-L2 bank - Must be true for the 'opt' and 'GS' protocols BE CAREFUL HERE!!!
MessageBuffer forwardedRequestFromDir, network="To", virtual_network="2", ordered="true";
MessageBuffer responseFromDir, network="To", virtual_network="3", ordered="false"; // Dir -> mod-L2 bank
MessageBuffer dummyFrom4, network="To", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
// ** IN QUEUES **
MessageBuffer dummyTo0, network="From", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer dummyTo2, network="From", virtual_network="2", ordered="false"; // dummy buffer that shouldn't be used
MessageBuffer dummyTo3, network="From", virtual_network="3", ordered="false"; // dummy buffer that shouldn't be used
MessageBuffer finalAckToDir, network="From", virtual_network="4", ordered="false"; // a mod-L2 bank -> this Dir
// STATES
enumeration(State, desc="Directory states", default="Directory_State_NP") {
// Base states
NP, desc="Not present";
I, desc="Idle";
S, desc="Shared";
O, desc="Owned";
M, desc="Modified", format="!b";
OO, desc="transient state of O->GetS/GetInstr->O";
OM, desc="transient state of O->GetX->M";
MO, desc="transient state of M->GetS/GetInstr->O";
MM, desc="transient state of M->GetX->M";
}
// Events
enumeration(Event, desc="Directory events") {
GETS, desc="A GETS arrives";
GET_INSTR, desc="";
GETX_Owner, desc="A GETX arrives, requestor is owner";
GETX_NotOwner, desc="A GETX arrives, requestor is not owner";
PUTX_Owner, "PUTX (requestor is owner)", desc="A PUTX arrives, requestor is owner";
PUTX_NotOwner, "PUTX (requestor not owner)",desc="A PUTX arrives, requestor is not owner";
FinalAck, desc="";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...") {
State DirectoryState, desc="Directory state";
Set Sharers, desc="Set of sharers - must be L2 caches"; // Note this is a Set and not a NetDest for space concerns
bool DirOwner, default="true", desc="Is dir owner?";
NodeID ProcOwner, default="0", desc="Processor owner"; // Note this is an int for space concerns
DataBlock DataBlk, desc="data for the block";
}
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
// ** OBJECTS **
DirectoryMemory directory, constructor_hack="i";
State getState(Address addr) {
if (directory.isPresent(addr)) {
return directory[addr].DirectoryState;
}
return State:NP;
}
std::string getDirStateStr(Address addr) {
return Directory_State_to_string(getState(addr));
}
std::string getRequestTypeStr(CoherenceRequestType type) {
return CoherenceRequestType_to_string(type);
}
void setState(Address addr, State state) {
if (directory.isPresent(addr)) {
DEBUG_EXPR(addr);
DEBUG_EXPR(directory[addr].DirectoryState);
directory[addr].DirectoryState := state;
DEBUG_EXPR(directory[addr].DirectoryState);
DEBUG_EXPR(state);
}
}
// ** OUT_PORTS **
out_port(forwardedRequestNetwork_out, RequestMsg, forwardedRequestFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(ownRequestQueue_out, RequestMsg, requestToDir);
// ** IN_PORTS **
in_port(dummyTo0_in, RequestMsg, dummyTo0) {
if (dummyTo0_in.isReady()) {
peek(dummyTo0_in, RequestMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(id);
DEBUG_EXPR(in_msg.Type);
DEBUG_EXPR(getState(in_msg.Address));
DEBUG_EXPR(in_msg.RequestorMachId);
}
error("dummyTo0 port should not be used");
}
}
in_port(dummyTo2_in, RequestMsg, dummyTo2) {
if (dummyTo2_in.isReady()) {
peek(dummyTo2_in, RequestMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(id);
DEBUG_EXPR(in_msg.Type);
DEBUG_EXPR(getState(in_msg.Address));
DEBUG_EXPR(in_msg.RequestorMachId);
}
error("dummyTo2 port should not be used");
}
}
in_port(dummyTo3_in, RequestMsg, dummyTo3) {
if (dummyTo3_in.isReady()) {
peek(dummyTo3_in, RequestMsg) {
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(id);
DEBUG_EXPR(in_msg.Type);
DEBUG_EXPR(getState(in_msg.Address));
DEBUG_EXPR(in_msg.RequestorMachId);
}
error("dummyTo3 port should not be used");
}
}
in_port(finalAckNetwork_in, ResponseMsg, finalAckToDir){
if(finalAckNetwork_in.isReady()){
peek(finalAckNetwork_in, ResponseMsg){
assert(in_msg.Destination.isElement(machineID));
if(in_msg.Type == CoherenceResponseType:FINALACK){
trigger(Event:FinalAck, in_msg.Address);
} else {
error("Invalid message");
}
}
}
}
in_port(requestNetwork_in, RequestMsg, requestToDir) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
trigger(Event:GET_INSTR, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
if(directory[in_msg.Address].DirOwner == false &&
L2CacheMachIDToChipID(in_msg.RequestorMachId) == directory[in_msg.Address].ProcOwner) {
trigger(Event:GETX_Owner, in_msg.Address);
} else {
trigger(Event:GETX_NotOwner, in_msg.Address);
}
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
if (directory[in_msg.Address].DirOwner == false &&
L2CacheMachIDToChipID(in_msg.RequestorMachId) == directory[in_msg.Address].ProcOwner) {
trigger(Event:PUTX_Owner, in_msg.Address);
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address);
}
} else {
error("Invalid message");
}
}
}
}
// Actions
// a_addRequestorToSharers
action(a_addRequestorToSharers, "a", desc="Add requestor to list of sharers") {
peek(requestNetwork_in, RequestMsg) {
directory[address].Sharers.add(L2CacheMachIDToChipID(in_msg.RequestorMachId));
DEBUG_EXPR(directory[address].Sharers);
}
}
// b_dataToRequestor
action(b_dataToRequestor, "b", desc="Send data to requestor") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.SenderMachId := machineID;
if(in_msg.Type == CoherenceRequestType:GETX) {
DEBUG_EXPR(directory[address].Sharers);
DEBUG_EXPR(directory[address].Sharers.count());
out_msg.NumPendingExtAcks := directory[address].Sharers.count();
} else {
out_msg.NumPendingExtAcks := 0; // don't need to send pending ack count to GETS requestor
}
out_msg.Destination.add(in_msg.RequestorMachId);
out_msg.DataBlk := directory[address].DataBlk;
DEBUG_EXPR(out_msg.Address);
DEBUG_EXPR(out_msg.DataBlk);
DEBUG_EXPR(out_msg.NumPendingExtAcks);
DEBUG_EXPR(out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Data;
}
}
}
// d_forwardRequestToOwner
action(d_forwardRequestToOwner, "d", desc="Forward request to owner") {
peek(requestNetwork_in, RequestMsg) {
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.RequestorMachId := in_msg.RequestorMachId;
out_msg.Destination.add(map_L2ChipId_to_L2Cache(out_msg.Address, directory[address].ProcOwner));
DEBUG_EXPR(out_msg.Destination);
if(in_msg.Type == CoherenceRequestType:GETX) {
out_msg.NumPendingExtAcks := directory[address].Sharers.count();
} else {
out_msg.NumPendingExtAcks := 0; // don't need to send pending ack count to GETS requestor
}
out_msg.MessageSize := MessageSizeType:Control;
DEBUG_EXPR(out_msg.Address);
DEBUG_EXPR(out_msg.NumPendingExtAcks);
DEBUG_EXPR(out_msg.Destination);
}
}
}
action(f_setOwnerToRequestor, "f", desc="Set owner equal to requestor") {
peek(requestNetwork_in, RequestMsg) {
directory[address].ProcOwner := L2CacheMachIDToChipID(in_msg.RequestorMachId);
directory[address].DirOwner := false;
}
DEBUG_EXPR(directory[address].ProcOwner);
}
action(g_clearSharers, "g", desc="Clear list of sharers") {
directory[address].Sharers.clear();
}
// currently done via multicast message
action(h_invToSharers, "h", desc="Send INVs to all sharers") {
peek(requestNetwork_in, RequestMsg) {
DEBUG_EXPR(directory[address].Sharers.count());
if(directory[address].Sharers.count() != 0){
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.RequestorMachId := in_msg.RequestorMachId;
DEBUG_EXPR(directory[address].Sharers);
out_msg.Destination := getMultiStaticL2BankNetDest(address, directory[address].Sharers);
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
DEBUG_EXPR(directory[address].Sharers);
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
profileMsgDelay(1, requestNetwork_in.dequeue_getDelayCycles());
}
action(l_writeRequestDataToMemory, "l", desc="Write PUTX/DWN data to memory") {
peek(requestNetwork_in, RequestMsg) {
directory[in_msg.Address].DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(n_writebackAckToRequestor, "n", desc="Send WB_ack to requestor") {
peek(requestNetwork_in, RequestMsg) {
// This needs to be DIRECTORY_LATENCY to keep the queue fifo
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(in_msg.RequestorMachId);
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
action(m_forwardExclusiveRequestToOwner, "m", desc="Send EXE_ack to requestor") {
peek(requestNetwork_in, RequestMsg) {
// This needs to be DIRECTORY_LATENCY to keep the queue fifo
enqueue(forwardedRequestNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:EXE_ACK;
out_msg.RequestorMachId := machineID;
out_msg.Destination.add(in_msg.RequestorMachId);
out_msg.MessageSize := MessageSizeType:Control;
}
}
}
action(uu_profile, "u/", desc="Profile this transition.") {
peek(requestNetwork_in, RequestMsg) {
profile_request(in_msg.L1CacheStateStr, in_msg.L2CacheStateStr, getDirStateStr(address), getRequestTypeStr(in_msg.Type));
}
}
action(p_clearOwner, "p", desc="Clear owner") {
directory[address].DirOwner := true; // set owner equal to dir
}
action(r_addOwnerToSharers, "r", desc="Add owner to list of sharers") {
DEBUG_EXPR(directory[address].ProcOwner);
directory[address].Sharers.add(directory[address].ProcOwner);
DEBUG_EXPR(directory[address].Sharers);
}
action(t_removeOwnerFromSharers, "t", desc="Remove owner from list of sharers") {
DEBUG_EXPR(directory[address].ProcOwner);
directory[address].Sharers.remove(directory[address].ProcOwner);
DEBUG_EXPR(directory[address].Sharers);
}
action(u_removeRequestorFromSharers, "u", desc="Remove requestor from list of sharers") {
peek(requestNetwork_in, RequestMsg) {
DEBUG_EXPR(in_msg.RequestorMachId);
directory[address].Sharers.remove(L2CacheMachIDToChipID(in_msg.RequestorMachId));
DEBUG_EXPR(directory[address].Sharers);
}
}
action(x_recycleRequest, "x", desc=""){
peek(requestNetwork_in, RequestMsg) {
enqueue(ownRequestQueue_out, RequestMsg, latency="RECYCLE_LATENCY"){
out_msg := in_msg;
}
}
}
action(hh_popFinalAckQueue, "\h", desc=""){
profileMsgDelay(4, finalAckNetwork_in.dequeue_getDelayCycles());
}
//action(z_stall, "z", desc=""){
//}
// TRANSITIONS
transition({OM,MM}, FinalAck, M){
hh_popFinalAckQueue;
}
transition({OO,MO}, FinalAck, O){
hh_popFinalAckQueue;
}
transition({OO, OM, MO, MM}, {GETS, GET_INSTR, GETX_Owner, GETX_NotOwner, PUTX_Owner}){
x_recycleRequest;
j_popIncomingRequestQueue;
// z_stall;
}
// ---------------------------
transition({NP, I, S, M, O, OO, OM, MO, MM}, PUTX_NotOwner) {
uu_profile;
n_writebackAckToRequestor;
j_popIncomingRequestQueue;
}
// Transitions from Idle
transition({NP,I}, {GETS,GET_INSTR}, S) {
uu_profile;
a_addRequestorToSharers;
b_dataToRequestor;
j_popIncomingRequestQueue;
}
transition({NP,I}, GETX_NotOwner, M) {
uu_profile;
f_setOwnerToRequestor;
b_dataToRequestor;
j_popIncomingRequestQueue;
}
// Transitions from Shared
transition(S, {GETS,GET_INSTR}) {
uu_profile;
a_addRequestorToSharers;
b_dataToRequestor;
j_popIncomingRequestQueue;
}
transition(S, GETX_NotOwner, M) {
uu_profile;
u_removeRequestorFromSharers;
b_dataToRequestor;
f_setOwnerToRequestor;
h_invToSharers;
g_clearSharers;
j_popIncomingRequestQueue;
}
// Transitions from Owned
transition(O, {GETS,GET_INSTR}, OO) {
uu_profile;
a_addRequestorToSharers;
d_forwardRequestToOwner;
j_popIncomingRequestQueue;
}
transition(O, {GETX_NotOwner, GETX_Owner}, OM) {
uu_profile;
u_removeRequestorFromSharers;
t_removeOwnerFromSharers;
d_forwardRequestToOwner;
f_setOwnerToRequestor;
h_invToSharers;
g_clearSharers;
j_popIncomingRequestQueue;
}
transition(O, PUTX_Owner, S) {
uu_profile;
u_removeRequestorFromSharers;
l_writeRequestDataToMemory;
n_writebackAckToRequestor;
p_clearOwner;
j_popIncomingRequestQueue;
}
// Transitions from Modified
transition(M, {GETS,GET_INSTR}, MO) {
uu_profile;
a_addRequestorToSharers;
r_addOwnerToSharers;
d_forwardRequestToOwner;
j_popIncomingRequestQueue;
}
transition(M, GETX_NotOwner, MM) {
uu_profile;
d_forwardRequestToOwner;
f_setOwnerToRequestor;
j_popIncomingRequestQueue;
}
transition(M, GETX_Owner) {
uu_profile;
m_forwardExclusiveRequestToOwner;
j_popIncomingRequestQueue;
}
transition(M, PUTX_Owner, I) {
uu_profile;
l_writeRequestDataToMemory;
n_writebackAckToRequestor;
p_clearOwner;
j_popIncomingRequestQueue;
}
}

View file

@ -1,115 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*
*/
// CoherenceRequestType
enumeration(CoherenceRequestType, desc="...") {
GETX, desc="Get eXclusive";
UPGRADE, desc="UPGRADE to exclusive";
GETS, desc="Get Shared";
GET_INSTR, desc="Get Instruction";
PUTX, desc="Put eXclusive";
PUTS, desc="Put Shared";
INV, desc="INValidate";
INV_S, desc="INValidate the shared version";
L1_DG, desc="L1 cache DownGrade";
WB_ACK, desc="Write Back ACKnowledgment";
EXE_ACK, desc="EXclusivE ACKnowledgment";
}
// CoherenceResponseType
enumeration(CoherenceResponseType, desc="...") {
ACK, desc="ACKnowledgment";
INV_ACK, desc="INValidation ACKnowledgment";
DG_ACK, desc="DownGrade ACKnowledgment";
NACK, desc="Negative ACKnowledgment";
DATA, desc="Data";
DATA_S, desc="Data to L1 cache, then imediately go to shared state";
DATA_I, desc="Data to L1 cache, then imediately go to inv state";
FINALACK, desc="";
}
// RequestMsg
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
AccessModeType AccessMode, desc="user/supervisor access type";
MachineID RequestorMachId, desc="What component request";
NetDest Destination, desc="What components receive the request, includes MachineType and num";
DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
int NumPendingExtAcks, desc="Number of acks to wait for"; // Needed for forwarded responses only
MessageSizeType MessageSize, desc="size category of the message";
std::string L1CacheStateStr, desc="describes L1 cache block state";
std::string L2CacheStateStr, desc="describes L2 cache block state";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// ResponseMsg
structure(ResponseMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
MachineID SenderMachId, desc="What component sent the data";
NetDest Destination, desc="Node to whom the data is sent";
DataBlock DataBlk, desc="data for the cache line";
int NumPendingExtAcks, desc="Number of acks to wait for";
MessageSizeType MessageSize, desc="size category of the message";
}
GenericRequestType convertToGenericType(CoherenceRequestType type) {
if(type == CoherenceRequestType:PUTX) {
return GenericRequestType:PUTX;
} else if(type == CoherenceRequestType:GETS) {
return GenericRequestType:GETS;
} else if(type == CoherenceRequestType:GET_INSTR) {
return GenericRequestType:GET_INSTR;
} else if(type == CoherenceRequestType:GETX) {
return GenericRequestType:GETX;
} else if(type == CoherenceRequestType:UPGRADE) {
return GenericRequestType:UPGRADE;
} else if(type == CoherenceRequestType:PUTS) {
return GenericRequestType:PUTS;
} else if(type == CoherenceRequestType:INV) {
return GenericRequestType:INV;
} else if(type == CoherenceRequestType:INV_S) {
return GenericRequestType:INV_S;
} else if(type == CoherenceRequestType:L1_DG) {
return GenericRequestType:DOWNGRADE;
} else if(type == CoherenceRequestType:WB_ACK) {
return GenericRequestType:WB_ACK;
} else if(type == CoherenceRequestType:EXE_ACK) {
return GenericRequestType:EXE_ACK;
} else {
DEBUG_EXPR(type);
error("invalid CoherenceRequestType");
}
}

View file

@ -1,8 +0,0 @@
# protocol briefly described in
# doc/MSI_MOSI_CMP_directory_2level-protocol-description.txt
MSI_MOSI_CMP_directory-msg.sm
MSI_MOSI_CMP_directory-L1cache.sm
MSI_MOSI_CMP_directory-L2cache.sm
MSI_MOSI_CMP_directory-dir.sm
standard_CMP-protocol.sm

View file

@ -1,39 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
// global protocol features
global(Protocol, desc="Global properties of this protocol",
interface = "AbstractProtocol") {
bool TwoLevelCache := false;
}

View file

@ -1,39 +0,0 @@
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id$
*/
// global protocol features
global(Protocol, desc="Global properties of this protocol",
interface = "AbstractProtocol") {
bool TwoLevelCache := true;
}