ruby: rename variables Addr to addr
Avoid clash between type Addr and variable name Addr.
This commit is contained in:
parent
93c173a95e
commit
9ea5d9cad9
28 changed files with 746 additions and 746 deletions
|
@ -119,7 +119,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
DataBlock DataBlk, desc="Buffer for the data block";
|
||||
bool Dirty, default="false", desc="data is dirty";
|
||||
|
@ -256,30 +256,30 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
// Messages for this L0 cache from the L1 cache
|
||||
in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
|
||||
if (messgeBuffer_in.isReady()) {
|
||||
peek(messgeBuffer_in, CoherenceMsg, block_on="Addr") {
|
||||
peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
|
||||
assert(in_msg.Dest == machineID);
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Data_Exclusive, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
||||
} else if(in_msg.Class == CoherenceClass:DATA) {
|
||||
trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:ACK) {
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:WB_ACK) {
|
||||
trigger(Event:WB_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:INV) {
|
||||
trigger(Event:Inv, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:GETX ||
|
||||
in_msg.Class == CoherenceClass:UPGRADE) {
|
||||
// upgrade transforms to GETX due to race
|
||||
trigger(Event:Fwd_GETX, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:GETS) {
|
||||
trigger(Event:Fwd_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:GET_INSTR) {
|
||||
trigger(Event:Fwd_GET_INSTR, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Invalid forwarded request type");
|
||||
}
|
||||
|
@ -363,7 +363,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
action(a_issueGETS, "a", desc="Issue GETS") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:GETS;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Dest := createMachineID(MachineType:L1Cache, version);
|
||||
|
@ -378,7 +378,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
action(b_issueGETX, "b", desc="Issue GETX") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:GETX;
|
||||
out_msg.Sender := machineID;
|
||||
DPRINTF(RubySlicc, "%s\n", machineID);
|
||||
|
@ -395,7 +395,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
action(c_issueUPGRADE, "c", desc="Issue GETX") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:UPGRADE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Dest := createMachineID(MachineType:L1Cache, version);
|
||||
|
@ -411,7 +411,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
|
||||
enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:INV_DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -425,7 +425,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
|
||||
peek(messgeBuffer_in, CoherenceMsg) {
|
||||
enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:INV_ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Dest := createMachineID(MachineType:L1Cache, version);
|
||||
|
@ -444,7 +444,7 @@ machine(L0Cache, "MESI Directory L0 Cache")
|
|||
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
|
||||
enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:PUTX;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
out_msg.Sender:= machineID;
|
||||
|
|
|
@ -133,7 +133,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
DataBlock DataBlk, desc="Buffer for the data block";
|
||||
bool Dirty, default="false", desc="data is dirty";
|
||||
|
@ -270,30 +270,30 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Data_Exclusive, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
||||
} else if(in_msg.Type == CoherenceResponseType:DATA) {
|
||||
if (getState(tbe, cache_entry, in_msg.Addr) == State:IS &&
|
||||
if (getState(tbe, cache_entry, in_msg.addr) == State:IS &&
|
||||
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||
|
||||
trigger(Event:DataS_fromL1, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
|
||||
|
||||
} else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
|
||||
trigger(Event:Data_all_Acks, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
|
||||
trigger(Event:Ack_all, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
|
||||
trigger(Event:WB_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Invalid L1 response type");
|
||||
}
|
||||
|
@ -306,30 +306,30 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
if(requestNetwork_in.isReady()) {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == CoherenceRequestType:INV) {
|
||||
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
|
||||
trigger(Event:L0_Invalidate_Else, in_msg.Addr,
|
||||
trigger(Event:L0_Invalidate_Else, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Inv, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETX ||
|
||||
in_msg.Type == CoherenceRequestType:UPGRADE) {
|
||||
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
|
||||
trigger(Event:L0_Invalidate_Else, in_msg.Addr,
|
||||
trigger(Event:L0_Invalidate_Else, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Fwd_GETX, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
|
||||
trigger(Event:L0_Invalidate_Else, in_msg.Addr,
|
||||
trigger(Event:L0_Invalidate_Else, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Fwd_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else {
|
||||
error("Invalid forwarded request type");
|
||||
|
@ -342,36 +342,36 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
|
||||
if (messageBufferFromL0_in.isReady()) {
|
||||
peek(messageBufferFromL0_in, CoherenceMsg) {
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if(in_msg.Class == CoherenceClass:INV_DATA) {
|
||||
trigger(Event:L0_DataAck, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Class == CoherenceClass:INV_ACK) {
|
||||
trigger(Event:L0_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:L0_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
if (is_valid(cache_entry)) {
|
||||
trigger(mandatory_request_type_to_event(in_msg.Class),
|
||||
in_msg.Addr, cache_entry, tbe);
|
||||
in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
if (cache.cacheAvail(in_msg.Addr)) {
|
||||
if (cache.cacheAvail(in_msg.addr)) {
|
||||
// L1 does't have the line, but we have space for it
|
||||
// in the L1 let's see if the L2 has it
|
||||
trigger(mandatory_request_type_to_event(in_msg.Class),
|
||||
in_msg.Addr, cache_entry, tbe);
|
||||
in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
// No room in the L1, so we need to make room in the L1
|
||||
Entry victim_entry :=
|
||||
getCacheEntry(cache.cacheProbe(in_msg.Addr));
|
||||
TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.Addr)];
|
||||
getCacheEntry(cache.cacheProbe(in_msg.addr));
|
||||
TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
|
||||
|
||||
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
|
||||
trigger(Event:L0_Invalidate_Own,
|
||||
cache.cacheProbe(in_msg.Addr),
|
||||
cache.cacheProbe(in_msg.addr),
|
||||
victim_entry, victim_tbe);
|
||||
} else {
|
||||
trigger(Event:L1_Replacement,
|
||||
cache.cacheProbe(in_msg.Addr),
|
||||
cache.cacheProbe(in_msg.addr),
|
||||
victim_entry, victim_tbe);
|
||||
}
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(a_issueGETS, "a", desc="Issue GETS") {
|
||||
peek(messageBufferFromL0_in, CoherenceMsg) {
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -401,7 +401,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(b_issueGETX, "b", desc="Issue GETX") {
|
||||
peek(messageBufferFromL0_in, CoherenceMsg) {
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
DPRINTF(RubySlicc, "%s\n", machineID);
|
||||
|
@ -418,7 +418,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(c_issueUPGRADE, "c", desc="Issue GETX") {
|
||||
peek(messageBufferFromL0_in, CoherenceMsg) {
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:UPGRADE;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -435,7 +435,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -449,7 +449,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -464,7 +464,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -478,7 +478,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -492,7 +492,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -504,7 +504,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -518,7 +518,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -532,7 +532,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -544,7 +544,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
action(forward_eviction_to_L0, "\cc", desc="sends eviction information to the processor") {
|
||||
enqueue(bufferToL0_out, CoherenceMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:INV;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Dest := createMachineID(MachineType:L0Cache, version);
|
||||
|
@ -555,7 +555,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTX;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
out_msg.Requestor:= machineID;
|
||||
|
@ -572,7 +572,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -584,7 +584,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -599,7 +599,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Dest := createMachineID(MachineType:L0Cache, version);
|
||||
|
@ -612,7 +612,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Class := CoherenceClass:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Dest := createMachineID(MachineType:L0Cache, version);
|
||||
|
|
|
@ -50,7 +50,7 @@ enumeration(CoherenceClass, desc="...") {
|
|||
|
||||
// Class for messages sent between the L0 and the L1 controllers.
|
||||
structure(CoherenceMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address of the cache block";
|
||||
Address addr, desc="Physical address of the cache block";
|
||||
CoherenceClass Class, desc="Type of message (GetS, GetX, PutX, etc)";
|
||||
RubyAccessMode AccessMode, desc="user/supervisor access type";
|
||||
MachineID Sender, desc="What component sent this message";
|
||||
|
@ -62,7 +62,7 @@ structure(CoherenceMsg, desc="...", interface="Message") {
|
|||
bool functionalRead(Packet *pkt) {
|
||||
// Only PUTX messages contains the data block
|
||||
if (Class == CoherenceClass:PUTX) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -71,6 +71,6 @@ structure(CoherenceMsg, desc="...", interface="Message") {
|
|||
bool functionalWrite(Packet *pkt) {
|
||||
// No check on message type required since the protocol should
|
||||
// read data from those messages that contain the block
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
DataBlock DataBlk, desc="Buffer for the data block";
|
||||
bool Dirty, default="false", desc="data is dirty";
|
||||
|
@ -373,36 +373,36 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
// Response L1 Network - response msg to this L1 cache
|
||||
in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
|
||||
if (responseL1Network_in.isReady()) {
|
||||
peek(responseL1Network_in, ResponseMsg, block_on="Addr") {
|
||||
peek(responseL1Network_in, ResponseMsg, block_on="addr") {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Data_Exclusive, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
|
||||
} else if(in_msg.Type == CoherenceResponseType:DATA) {
|
||||
if ((getState(tbe, cache_entry, in_msg.Addr) == State:IS ||
|
||||
getState(tbe, cache_entry, in_msg.Addr) == State:IS_I ||
|
||||
getState(tbe, cache_entry, in_msg.Addr) == State:PF_IS ||
|
||||
getState(tbe, cache_entry, in_msg.Addr) == State:PF_IS_I) &&
|
||||
if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
|
||||
getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
|
||||
getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
|
||||
getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
|
||||
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||
|
||||
trigger(Event:DataS_fromL1, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
|
||||
|
||||
} else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
|
||||
trigger(Event:Data_all_Acks, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
|
||||
trigger(Event:Ack_all, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
|
||||
trigger(Event:WB_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Invalid L1 response type");
|
||||
}
|
||||
|
@ -413,22 +413,22 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
// Request InterChip network - request from this L1 cache to the shared L2
|
||||
in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
|
||||
if(requestL1Network_in.isReady()) {
|
||||
peek(requestL1Network_in, RequestMsg, block_on="Addr") {
|
||||
peek(requestL1Network_in, RequestMsg, block_on="addr") {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == CoherenceRequestType:INV) {
|
||||
trigger(Event:Inv, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETX ||
|
||||
in_msg.Type == CoherenceRequestType:UPGRADE) {
|
||||
// upgrade transforms to GETX due to race
|
||||
trigger(Event:Fwd_GETX, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:Fwd_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
|
||||
trigger(Event:Fwd_GET_INSTR, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Invalid forwarded request type");
|
||||
}
|
||||
|
@ -520,7 +520,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(a_issueGETS, "a", desc="Issue GETS") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -537,7 +537,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
|
||||
peek(optionalQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -554,7 +554,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GET_INSTR;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -572,7 +572,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
desc="Issue GETINSTR for prefetch request") {
|
||||
peek(optionalQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GET_INSTR;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(
|
||||
|
@ -591,7 +591,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(b_issueGETX, "b", desc="Issue GETX") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
DPRINTF(RubySlicc, "%s\n", machineID);
|
||||
|
@ -609,7 +609,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
|
||||
peek(optionalQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
DPRINTF(RubySlicc, "%s\n", machineID);
|
||||
|
@ -629,7 +629,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(c_issueUPGRADE, "c", desc="Issue GETX") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:UPGRADE;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -647,7 +647,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
peek(requestL1Network_in, RequestMsg) {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -661,7 +661,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -676,7 +676,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
peek(requestL1Network_in, RequestMsg) {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -690,7 +690,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -704,7 +704,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
|
||||
peek(requestL1Network_in, RequestMsg) {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -716,7 +716,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -730,7 +730,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -744,7 +744,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
|
||||
peek(requestL1Network_in, RequestMsg) {
|
||||
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -764,7 +764,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
|
||||
enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTX;
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
out_msg.Dirty := cache_entry.Dirty;
|
||||
|
@ -781,7 +781,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
@ -793,7 +793,7 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
|
|||
|
||||
action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
|
|
@ -129,7 +129,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
DataBlock DataBlk, desc="Buffer for the data block";
|
||||
bool Dirty, default="false", desc="Data is Dirty";
|
||||
|
@ -287,17 +287,17 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
|
||||
if(L1unblockNetwork_in.isReady()) {
|
||||
peek(L1unblockNetwork_in, ResponseMsg) {
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
|
||||
in_msg.Addr, getState(tbe, cache_entry, in_msg.Addr),
|
||||
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
|
||||
in_msg.Sender, in_msg.Type, in_msg.Destination);
|
||||
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
|
||||
trigger(Event:Exclusive_Unblock, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Exclusive_Unblock, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||
trigger(Event:Unblock, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Unblock, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("unknown unblock message");
|
||||
}
|
||||
|
@ -311,21 +311,21 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(responseL2Network_in, ResponseMsg) {
|
||||
// test wether it's from a local L1 or an off chip source
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||
if(in_msg.Type == CoherenceResponseType:DATA) {
|
||||
if (in_msg.Dirty) {
|
||||
trigger(Event:WB_Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:WB_Data, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:WB_Data_clean, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:WB_Data_clean, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
if ((getPendingAcks(tbe) - in_msg.AckCount) == 0) {
|
||||
trigger(Event:Ack_all, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else {
|
||||
error("unknown message type");
|
||||
|
@ -333,11 +333,11 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
|
||||
} else { // external message
|
||||
if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
||||
trigger(Event:Mem_Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Mem_Data, in_msg.addr, cache_entry, tbe);
|
||||
} else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
|
||||
trigger(Event:Mem_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Mem_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if(in_msg.Type == CoherenceResponseType:INV) {
|
||||
trigger(Event:MEM_Inv, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:MEM_Inv, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("unknown message type");
|
||||
}
|
||||
|
@ -350,11 +350,11 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
|
||||
if(L1RequestL2Network_in.isReady()) {
|
||||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
|
||||
in_msg.Addr, getState(tbe, cache_entry, in_msg.Addr),
|
||||
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
|
||||
in_msg.Requestor, in_msg.Type, in_msg.Destination);
|
||||
|
||||
assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
|
||||
|
@ -362,24 +362,24 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
|
||||
if (is_valid(cache_entry)) {
|
||||
// The L2 contains the block, so proceeded with handling the request
|
||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Addr,
|
||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.addr,
|
||||
in_msg.Requestor, cache_entry),
|
||||
in_msg.Addr, cache_entry, tbe);
|
||||
in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
if (L2cache.cacheAvail(in_msg.Addr)) {
|
||||
if (L2cache.cacheAvail(in_msg.addr)) {
|
||||
// L2 does't have the line, but we have space for it in the L2
|
||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Addr,
|
||||
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.addr,
|
||||
in_msg.Requestor, cache_entry),
|
||||
in_msg.Addr, cache_entry, tbe);
|
||||
in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
// No room in the L2, so we need to make room before handling the request
|
||||
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.Addr));
|
||||
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
|
||||
if (isDirty(L2cache_entry)) {
|
||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.Addr),
|
||||
L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.Addr)]);
|
||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
||||
L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
||||
} else {
|
||||
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.Addr),
|
||||
L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.Addr)]);
|
||||
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
|
||||
L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
|
||||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(DirRequestL2Network_out, RequestMsg, l2_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -406,7 +406,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(cache_entry.Exclusive);
|
||||
|
@ -418,7 +418,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
action(c_exclusiveReplacement, "c", desc="Send data to memory") {
|
||||
enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -430,7 +430,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
|
||||
action(c_exclusiveCleanReplacement, "cc", desc="Send ack to memory for clean replacement") {
|
||||
enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -441,7 +441,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
|
||||
enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -455,7 +455,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -474,7 +474,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -493,7 +493,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -509,7 +509,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
assert(tbe.L1_GetS_IDs.count() > 0);
|
||||
enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
|
||||
|
@ -523,7 +523,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
assert(tbe.L1_GetS_IDs.count() == 1);
|
||||
enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
|
||||
|
@ -536,14 +536,14 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
|
||||
assert(is_valid(tbe));
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(tbe.L1_GetX_ID);
|
||||
DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
|
||||
out_msg.DataBlk := cache_entry.DataBlk;
|
||||
DPRINTF(RubySlicc, "Address: %s, Destination: %s, DataBlock: %s\n",
|
||||
out_msg.Addr, out_msg.Destination, out_msg.DataBlk);
|
||||
out_msg.addr, out_msg.Destination, out_msg.DataBlk);
|
||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||
}
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
|
||||
enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination := cache_entry.Sharers;
|
||||
|
@ -563,7 +563,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination := cache_entry.Sharers;
|
||||
|
@ -576,7 +576,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination := cache_entry.Sharers;
|
||||
|
@ -685,7 +685,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
action(t_sendWBAck, "t", desc="Send writeback ACK") {
|
||||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:WB_ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -698,7 +698,7 @@ machine(L2Cache, "MESI Directory L2 Cache CMP")
|
|||
peek(L1RequestL2Network_in, RequestMsg) {
|
||||
enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
|
|
@ -194,13 +194,13 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (isGETRequest(in_msg.Type)) {
|
||||
trigger(Event:Fetch, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg);
|
||||
error("Invalid message");
|
||||
|
@ -214,9 +214,9 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
|
||||
trigger(Event:Data, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:CleanReplacement, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -230,9 +230,9 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
if (memQueue_in.isReady()) {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||
trigger(Event:Memory_Data, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||
trigger(Event:Memory_Ack, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -246,7 +246,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
action(a_sendAck, "a", desc="Send ack to L2") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Sender);
|
||||
|
@ -258,7 +258,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
action(d_sendData, "d", desc="Send data to requestor") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
|
@ -266,7 +266,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
out_msg.Dirty := false;
|
||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||
|
||||
Entry e := getDirectoryEntry(in_msg.Addr);
|
||||
Entry e := getDirectoryEntry(in_msg.addr);
|
||||
e.Owner := in_msg.OriginalRequestorMachId;
|
||||
}
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
action(aa_sendAck, "aa", desc="Send ack to L2") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
|
@ -328,7 +328,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
||||
out_msg.Destination.add(map_Address_to_DMA(address));
|
||||
|
@ -347,7 +347,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
|
||||
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Destination.add(map_Address_to_DMA(address));
|
||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||
|
@ -365,7 +365,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:INV;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(getDirectoryEntry(address).Owner);
|
||||
|
@ -378,7 +378,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
|
||||
out_msg.Destination.add(map_Address_to_DMA(address));
|
||||
|
@ -392,7 +392,7 @@ machine(Directory, "MESI Two Level directory protocol")
|
|||
TBEs.allocate(address);
|
||||
set_tbe(TBEs[address]);
|
||||
tbe.DataBlk := in_msg.DataBlk;
|
||||
tbe.PhysicalAddress := in_msg.Addr;
|
||||
tbe.PhysicalAddress := in_msg.addr;
|
||||
tbe.Len := in_msg.Len;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,9 +95,9 @@ machine(DMA, "DMA Controller")
|
|||
if (dmaResponseQueue_in.isReady()) {
|
||||
peek( dmaResponseQueue_in, ResponseMsg) {
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:Ack, makeLineAddress(in_msg.Addr));
|
||||
trigger(Event:Ack, makeLineAddress(in_msg.addr));
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, makeLineAddress(in_msg.Addr));
|
||||
trigger(Event:Data, makeLineAddress(in_msg.addr));
|
||||
} else {
|
||||
error("Invalid response type");
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ machine(DMA, "DMA Controller")
|
|||
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
|
||||
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||
enqueue(requestToDir_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := in_msg.PhysicalAddress;
|
||||
out_msg.addr := in_msg.PhysicalAddress;
|
||||
out_msg.Type := CoherenceRequestType:DMA_READ;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
out_msg.Len := in_msg.Len;
|
||||
|
@ -121,7 +121,7 @@ machine(DMA, "DMA Controller")
|
|||
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
|
||||
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||
enqueue(requestToDir_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := in_msg.PhysicalAddress;
|
||||
out_msg.addr := in_msg.PhysicalAddress;
|
||||
out_msg.Type := CoherenceRequestType:DMA_WRITE;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
out_msg.Len := in_msg.Len;
|
||||
|
|
|
@ -58,7 +58,7 @@ enumeration(CoherenceResponseType, desc="...") {
|
|||
|
||||
// RequestMsg
|
||||
structure(RequestMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
|
||||
RubyAccessMode AccessMode, desc="user/supervisor access type";
|
||||
MachineID Requestor , desc="What component request";
|
||||
|
@ -72,7 +72,7 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
bool functionalRead(Packet *pkt) {
|
||||
// Only PUTX messages contains the data block
|
||||
if (Type == CoherenceRequestType:PUTX) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -81,13 +81,13 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
bool functionalWrite(Packet *pkt) {
|
||||
// No check on message type required since the protocol should
|
||||
// read data from those messages that contain the block
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseMsg
|
||||
structure(ResponseMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
|
||||
MachineID Sender, desc="What component sent the data";
|
||||
NetDest Destination, desc="Node to whom the data is sent";
|
||||
|
@ -102,7 +102,7 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
||||
Type == CoherenceResponseType:MEMORY_DATA) {
|
||||
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -111,6 +111,6 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
bool functionalWrite(Packet *pkt) {
|
||||
// No check on message type required since the protocol should
|
||||
// read data from those messages that contain the block
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -202,22 +202,22 @@ machine(L1Cache, "MI Example L1 Cache")
|
|||
|
||||
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
|
||||
if (forwardRequestNetwork_in.isReady()) {
|
||||
peek(forwardRequestNetwork_in, RequestMsg, block_on="Addr") {
|
||||
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:Fwd_GETX, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||
trigger(Event:Writeback_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||
trigger(Event:Writeback_Nack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||
trigger(Event:Inv, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
else {
|
||||
error("Unexpected message");
|
||||
|
@ -228,13 +228,13 @@ machine(L1Cache, "MI Example L1 Cache")
|
|||
|
||||
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
|
||||
if (responseNetwork_in.isReady()) {
|
||||
peek(responseNetwork_in, ResponseMsg, block_on="Addr") {
|
||||
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
else {
|
||||
error("Unexpected message");
|
||||
|
@ -268,7 +268,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
|||
|
||||
action(a_issueRequest, "a", desc="Issue a request") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -279,7 +279,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
|||
action(b_issuePUT, "b", desc="Issue a PUT request") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -292,7 +292,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
|||
peek(forwardRequestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -306,7 +306,7 @@ machine(L1Cache, "MI Example L1 Cache")
|
|||
peek(forwardRequestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
|
|
@ -222,16 +222,16 @@ machine(Directory, "Directory protocol")
|
|||
in_port(requestQueue_in, RequestMsg, requestToDir) {
|
||||
if (requestQueue_in.isReady()) {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:GETS, in_msg.Addr, tbe);
|
||||
trigger(Event:GETS, in_msg.addr, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:GETX, in_msg.Addr, tbe);
|
||||
trigger(Event:GETX, in_msg.addr, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||
if (getDirectoryEntry(in_msg.Addr).Owner.isElement(in_msg.Requestor)) {
|
||||
trigger(Event:PUTX, in_msg.Addr, tbe);
|
||||
if (getDirectoryEntry(in_msg.addr).Owner.isElement(in_msg.Requestor)) {
|
||||
trigger(Event:PUTX, in_msg.addr, tbe);
|
||||
} else {
|
||||
trigger(Event:PUTX_NotOwner, in_msg.Addr, tbe);
|
||||
trigger(Event:PUTX_NotOwner, in_msg.addr, tbe);
|
||||
}
|
||||
} else {
|
||||
error("Invalid message");
|
||||
|
@ -245,11 +245,11 @@ machine(Directory, "Directory protocol")
|
|||
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
|
||||
if (memQueue_in.isReady()) {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||
trigger(Event:Memory_Data, in_msg.Addr, tbe);
|
||||
trigger(Event:Memory_Data, in_msg.addr, tbe);
|
||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||
trigger(Event:Memory_Ack, in_msg.Addr, tbe);
|
||||
trigger(Event:Memory_Ack, in_msg.addr, tbe);
|
||||
} else {
|
||||
DPRINTF(RubySlicc,"%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -263,7 +263,7 @@ machine(Directory, "Directory protocol")
|
|||
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -275,7 +275,7 @@ machine(Directory, "Directory protocol")
|
|||
action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||||
out_msg.Requestor := in_msg.OriginalRequestorMachId;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
|
@ -287,7 +287,7 @@ machine(Directory, "Directory protocol")
|
|||
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_NACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -303,7 +303,7 @@ machine(Directory, "Directory protocol")
|
|||
action(d_sendData, "d", desc="Send data to requestor") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
|
@ -367,14 +367,14 @@ machine(Directory, "Directory protocol")
|
|||
action(f_forwardRequest, "f", desc="Forward request to owner") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
APPEND_TRANSITION_COMMENT("Own: ");
|
||||
APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Addr).Owner);
|
||||
APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.addr).Owner);
|
||||
APPEND_TRANSITION_COMMENT("Req: ");
|
||||
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination := getDirectoryEntry(in_msg.Addr).Owner;
|
||||
out_msg.Destination := getDirectoryEntry(in_msg.addr).Owner;
|
||||
out_msg.MessageSize := MessageSizeType:Writeback_Control;
|
||||
}
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ machine(Directory, "Directory protocol")
|
|||
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
|
||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
|
||||
|
|
|
@ -51,7 +51,7 @@ enumeration(CoherenceResponseType, desc="...") {
|
|||
|
||||
// RequestMsg (and also forwarded requests)
|
||||
structure(RequestMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
|
||||
MachineID Requestor, desc="Node who initiated the request";
|
||||
NetDest Destination, desc="Multicast destination mask";
|
||||
|
@ -61,7 +61,7 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
bool functionalRead(Packet *pkt) {
|
||||
// Valid data block is only present in PUTX messages
|
||||
if (Type == CoherenceRequestType:PUTX) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -69,13 +69,13 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
bool functionalWrite(Packet *pkt) {
|
||||
// No check on message type required since the protocol should read
|
||||
// data block from only those messages that contain valid data
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseMsg (and also unblock requests)
|
||||
structure(ResponseMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
|
||||
MachineID Sender, desc="Node who sent the data";
|
||||
NetDest Destination, desc="Node to whom the data is sent";
|
||||
|
@ -86,13 +86,13 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
bool functionalRead(Packet *pkt) {
|
||||
// A check on message type should appear here so that only those
|
||||
// messages that contain data
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
bool functionalWrite(Packet *pkt) {
|
||||
// No check on message type required since the protocol should read
|
||||
// data block from only those messages that contain valid data
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ machine(L1Cache, "Directory protocol")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
|
||||
bool Dirty, desc="Is the data dirty (different than memory)?";
|
||||
|
@ -278,8 +278,8 @@ machine(L1Cache, "Directory protocol")
|
|||
if (triggerQueue_in.isReady()) {
|
||||
peek(triggerQueue_in, TriggerMsg) {
|
||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||
trigger(Event:All_acks, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:All_acks, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -292,36 +292,36 @@ machine(L1Cache, "Directory protocol")
|
|||
// Request Network
|
||||
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
|
||||
if (requestNetwork_in.isReady()) {
|
||||
peek(requestNetwork_in, RequestMsg, block_on="Addr") {
|
||||
peek(requestNetwork_in, RequestMsg, block_on="addr") {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
|
||||
|
||||
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||
trigger(Event:Own_GETX, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Own_GETX, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
trigger(Event:Fwd_GETX, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fwd_GETX, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:Fwd_GETS, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fwd_GETS, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||
trigger(Event:Fwd_DMA, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fwd_DMA, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||
trigger(Event:Writeback_Ack, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Writeback_Ack, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
|
||||
trigger(Event:Writeback_Ack_Data, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Writeback_Ack_Data, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||
trigger(Event:Writeback_Nack, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Writeback_Nack, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||
trigger(Event:Inv, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Inv, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -332,16 +332,16 @@ machine(L1Cache, "Directory protocol")
|
|||
// Response Network
|
||||
in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
|
||||
if (responseToL1Cache_in.isReady()) {
|
||||
peek(responseToL1Cache_in, ResponseMsg, block_on="Addr") {
|
||||
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:Ack, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Ack, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Exclusive_Data, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Exclusive_Data, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -430,7 +430,7 @@ machine(L1Cache, "Directory protocol")
|
|||
action(a_issueGETS, "a", desc="Issue GETS") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -446,7 +446,7 @@ machine(L1Cache, "Directory protocol")
|
|||
action(b_issueGETX, "b", desc="Issue GETX") {
|
||||
peek(mandatoryQueue_in, RubyRequest) {
|
||||
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -461,7 +461,7 @@ machine(L1Cache, "Directory protocol")
|
|||
|
||||
action(d_issuePUTX, "d", desc="Issue PUTX") {
|
||||
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -473,7 +473,7 @@ machine(L1Cache, "Directory protocol")
|
|||
|
||||
action(dd_issuePUTO, "\d", desc="Issue PUTO") {
|
||||
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTO;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -485,7 +485,7 @@ machine(L1Cache, "Directory protocol")
|
|||
|
||||
action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
|
||||
enqueue(requestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -500,7 +500,7 @@ machine(L1Cache, "Directory protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -512,11 +512,11 @@ machine(L1Cache, "Directory protocol")
|
|||
out_msg.Acks := in_msg.Acks;
|
||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||
}
|
||||
DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Addr);
|
||||
DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.addr);
|
||||
}
|
||||
else {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -535,7 +535,7 @@ machine(L1Cache, "Directory protocol")
|
|||
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -553,7 +553,7 @@ machine(L1Cache, "Directory protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (in_msg.RequestorMachine == MachineType:L2Cache) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -568,7 +568,7 @@ machine(L1Cache, "Directory protocol")
|
|||
}
|
||||
else {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -587,7 +587,7 @@ machine(L1Cache, "Directory protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -598,7 +598,7 @@ machine(L1Cache, "Directory protocol")
|
|||
}
|
||||
else {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -613,7 +613,7 @@ machine(L1Cache, "Directory protocol")
|
|||
|
||||
action(g_sendUnblock, "g", desc="Send unblock to memory") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -625,7 +625,7 @@ machine(L1Cache, "Directory protocol")
|
|||
|
||||
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -709,7 +709,7 @@ machine(L1Cache, "Directory protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumPendingMsgs == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:ALL_ACKS;
|
||||
}
|
||||
}
|
||||
|
@ -722,7 +722,7 @@ machine(L1Cache, "Directory protocol")
|
|||
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DMA_ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -741,7 +741,7 @@ machine(L1Cache, "Directory protocol")
|
|||
if (in_msg.RequestorMachine == MachineType:L1Cache ||
|
||||
in_msg.RequestorMachine == MachineType:DMA) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -755,7 +755,7 @@ machine(L1Cache, "Directory protocol")
|
|||
}
|
||||
else {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -776,7 +776,7 @@ machine(L1Cache, "Directory protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (in_msg.RequestorMachine == MachineType:L1Cache) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -789,7 +789,7 @@ machine(L1Cache, "Directory protocol")
|
|||
}
|
||||
else {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
|
@ -808,7 +808,7 @@ machine(L1Cache, "Directory protocol")
|
|||
action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L1Cache;
|
||||
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
|
||||
|
|
|
@ -192,7 +192,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
Address PC, desc="Program counter of request";
|
||||
DataBlock DataBlk, desc="Buffer for the data block";
|
||||
|
@ -581,8 +581,8 @@ machine(L2Cache, "Token protocol")
|
|||
if (triggerQueue_in.isReady()) {
|
||||
peek(triggerQueue_in, TriggerMsg) {
|
||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||
trigger(Event:All_Acks, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:All_Acks, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -597,27 +597,27 @@ machine(L2Cache, "Token protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||
if (in_msg.Requestor == machineID) {
|
||||
trigger(Event:Own_GETX, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Own_GETX, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
trigger(Event:Fwd_GETX, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fwd_GETX, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:Fwd_GETS, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fwd_GETS, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||
trigger(Event:Fwd_DMA, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Fwd_DMA, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||
trigger(Event:Inv, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Inv, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||
trigger(Event:Writeback_Ack, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Writeback_Ack, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||
trigger(Event:Writeback_Nack, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Writeback_Nack, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -630,26 +630,26 @@ machine(L2Cache, "Token protocol")
|
|||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:L1_GETX, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_GETX, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:L1_GETS, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_GETS, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
||||
trigger(Event:L1_PUTO, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_PUTO, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||
trigger(Event:L1_PUTX, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_PUTX, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
if (isOnlySharer(cache_entry, in_msg.Addr, in_msg.Requestor)) {
|
||||
trigger(Event:L1_PUTS_only, in_msg.Addr,
|
||||
cache_entry, TBEs[in_msg.Addr]);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
|
||||
trigger(Event:L1_PUTS_only, in_msg.addr,
|
||||
cache_entry, TBEs[in_msg.addr]);
|
||||
}
|
||||
else {
|
||||
trigger(Event:L1_PUTS, in_msg.Addr,
|
||||
cache_entry, TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_PUTS, in_msg.addr,
|
||||
cache_entry, TBEs[in_msg.addr]);
|
||||
}
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
|
@ -666,52 +666,52 @@ machine(L2Cache, "Token protocol")
|
|||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
if (in_msg.SenderMachine == MachineType:L2Cache) {
|
||||
trigger(Event:ExtAck, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:ExtAck, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
}
|
||||
else {
|
||||
trigger(Event:IntAck, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:IntAck, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Data_Exclusive, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data_Exclusive, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||
trigger(Event:Unblock, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Unblock, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
||||
trigger(Event:Exclusive_Unblock, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:Exclusive_Unblock, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
if (is_invalid(cache_entry) &&
|
||||
L2cache.cacheAvail(in_msg.Addr) == false) {
|
||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.Addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.Addr)),
|
||||
TBEs[L2cache.cacheProbe(in_msg.Addr)]);
|
||||
L2cache.cacheAvail(in_msg.addr) == false) {
|
||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
|
||||
TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
||||
}
|
||||
else {
|
||||
trigger(Event:L1_WBDIRTYDATA, in_msg.Addr,
|
||||
cache_entry, TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
|
||||
cache_entry, TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
if (is_invalid(cache_entry) &&
|
||||
L2cache.cacheAvail(in_msg.Addr) == false) {
|
||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.Addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.Addr)),
|
||||
TBEs[L2cache.cacheProbe(in_msg.Addr)]);
|
||||
L2cache.cacheAvail(in_msg.addr) == false) {
|
||||
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
|
||||
TBEs[L2cache.cacheProbe(in_msg.addr)]);
|
||||
}
|
||||
else {
|
||||
trigger(Event:L1_WBCLEANDATA, in_msg.Addr,
|
||||
cache_entry, TBEs[in_msg.Addr]);
|
||||
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
|
||||
cache_entry, TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||
trigger(Event:DmaAck, in_msg.Addr,
|
||||
getCacheEntry(in_msg.Addr), TBEs[in_msg.Addr]);
|
||||
trigger(Event:DmaAck, in_msg.addr,
|
||||
getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -725,7 +725,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(a_issueGETS, "a", desc="issue local request globally") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
out_msg.Requestor := machineID;
|
||||
|
@ -738,7 +738,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(a_issueGETX, "\a", desc="issue local request globally") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
out_msg.Requestor := machineID;
|
||||
|
@ -750,7 +750,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(b_issuePUTX, "b", desc="Issue PUTX") {
|
||||
enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTX;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
out_msg.Requestor := machineID;
|
||||
|
@ -761,7 +761,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(b_issuePUTO, "\b", desc="Issue PUTO") {
|
||||
enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTO;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
|
@ -773,7 +773,7 @@ machine(L2Cache, "Token protocol")
|
|||
/* PUTO, but local sharers exist */
|
||||
action(b_issuePUTO_ls, "\bb", desc="Issue PUTO") {
|
||||
enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTO_SHARERS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
|
@ -785,7 +785,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(c_sendDataFromTBEToL1GETS, "c", desc="Send data from TBE to L1 requestors in TBE") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -803,7 +803,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -820,7 +820,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -834,7 +834,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(c_sendDataFromTBEToFwdGETX, "cc", desc="Send data from TBE to external GETX") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -850,7 +850,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(tbe));
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -870,7 +870,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(c_sendDataFromTBEToFwdGETS, "ccc", desc="Send data from TBE to external GETX") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -889,7 +889,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
|
||||
assert(is_valid(tbe));
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -907,7 +907,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -928,7 +928,7 @@ machine(L2Cache, "Token protocol")
|
|||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -947,7 +947,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -967,7 +967,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -987,7 +987,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -1002,7 +1002,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(e_sendAck, "e", desc="Send ack with the tokens we've collected thus far.") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -1016,7 +1016,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(e_sendAckToL1Requestor, "\e", desc="Send ack with the tokens we've collected thus far.") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -1030,7 +1030,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(e_sendAckToL1RequestorFromTBE, "eee", desc="Send ack with the tokens we've collected thus far.") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
|
@ -1052,7 +1052,7 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
|
@ -1073,7 +1073,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
if (countLocalSharers(cache_entry, address) > 0) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
|
@ -1105,7 +1105,7 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -1131,7 +1131,7 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
}
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := tbe.L1_GetX_ID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -1144,7 +1144,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(f_sendUnblock, "f", desc="Send unblock to global directory") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.Sender := machineID;
|
||||
|
@ -1156,7 +1156,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(f_sendExclusiveUnblock, "\f", desc="Send unblock to global directory") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.Sender := machineID;
|
||||
|
@ -1168,7 +1168,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(g_recordLocalSharer, "g", desc="Record new local sharer from unblock message") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
recordLocalSharerInDir(cache_entry, in_msg.Addr, in_msg.Sender);
|
||||
recordLocalSharerInDir(cache_entry, in_msg.addr, in_msg.Sender);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1184,13 +1184,13 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(gg_clearSharerFromL1Response, "\gg", desc="Clear sharer from L1 response queue") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
removeSharerFromDir(cache_entry, in_msg.Addr, in_msg.Sender);
|
||||
removeSharerFromDir(cache_entry, in_msg.addr, in_msg.Sender);
|
||||
}
|
||||
}
|
||||
|
||||
action(gg_clearOwnerFromL1Response, "g\g", desc="Clear sharer from L1 response queue") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
removeOwnerFromDir(cache_entry, in_msg.Addr, in_msg.Sender);
|
||||
removeOwnerFromDir(cache_entry, in_msg.addr, in_msg.Sender);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1239,11 +1239,11 @@ machine(L2Cache, "Token protocol")
|
|||
action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.Addr));
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
||||
out_msg.Acks := 0 - 1;
|
||||
|
@ -1254,11 +1254,11 @@ machine(L2Cache, "Token protocol")
|
|||
action(jd_forwardDmaRequestToLocalOwner, "jd", desc="Forward dma request to local owner") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := in_msg.RequestorMachine;
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.Addr));
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
||||
out_msg.Acks := 0 - 1;
|
||||
|
@ -1270,12 +1270,12 @@ machine(L2Cache, "Token protocol")
|
|||
action(k_forwardLocalGETSToLocalSharer, "k", desc="Forward local request to local sharer/owner") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
// should randomize this so one node doesn't get abused more than others
|
||||
DirEntry dir_entry := getDirEntry(in_msg.Addr);
|
||||
DirEntry dir_entry := getDirEntry(in_msg.addr);
|
||||
out_msg.Destination.add(dir_entry.Sharers.smallestElement(MachineType:L1Cache));
|
||||
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
||||
}
|
||||
|
@ -1285,7 +1285,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(k_forwardLocalGETXToLocalOwner, "\k", desc="Forward local request to local owner") {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := tbe.L1_GetX_ID;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
|
@ -1300,11 +1300,11 @@ machine(L2Cache, "Token protocol")
|
|||
action(kk_forwardLocalGETXToLocalExclusive, "kk", desc="Forward local request to local owner") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.Addr));
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
|
||||
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
||||
out_msg.Acks := 1;
|
||||
}
|
||||
|
@ -1314,11 +1314,11 @@ machine(L2Cache, "Token protocol")
|
|||
action(kk_forwardLocalGETSToLocalOwner, "\kk", desc="Forward local request to local owner") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := MachineType:L1Cache;
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.Addr));
|
||||
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
|
||||
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
|
||||
}
|
||||
}
|
||||
|
@ -1328,7 +1328,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(l_writebackAckNeedData, "l", desc="Send writeback ack to L1 requesting data") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
// out_msg.Type := CoherenceResponseType:WRITEBACK_SEND_DATA;
|
||||
out_msg.Type := CoherenceRequestType:WB_ACK_DATA;
|
||||
out_msg.Requestor := machineID;
|
||||
|
@ -1342,7 +1342,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(l_writebackAckDropData, "\l", desc="Send writeback ack to L1 indicating to drop data") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
// out_msg.Type := CoherenceResponseType:WRITEBACK_ACK;
|
||||
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||||
out_msg.Requestor := machineID;
|
||||
|
@ -1356,7 +1356,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(ll_writebackNack, "\ll", desc="Send writeback nack to L1") {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := CoherenceRequestType:WB_NACK;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := MachineType:L2Cache;
|
||||
|
@ -1408,7 +1408,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumIntPendingAcks == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:ALL_ACKS;
|
||||
}
|
||||
}
|
||||
|
@ -1418,7 +1418,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumExtPendingAcks == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:ALL_ACKS;
|
||||
}
|
||||
}
|
||||
|
@ -1428,7 +1428,7 @@ machine(L2Cache, "Token protocol")
|
|||
action( qq_sendDataFromTBEToMemory, "qq", desc="Send data from TBE to directory") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:L2Cache;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -1558,7 +1558,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DMA_ACK;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.Sender := machineID;
|
||||
|
|
|
@ -232,28 +232,28 @@ machine(Directory, "Directory protocol")
|
|||
if (unblockNetwork_in.isReady()) {
|
||||
peek(unblockNetwork_in, ResponseMsg) {
|
||||
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||
if (getDirectoryEntry(in_msg.Addr).WaitingUnblocks == 1) {
|
||||
trigger(Event:Last_Unblock, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
|
||||
trigger(Event:Last_Unblock, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else {
|
||||
trigger(Event:Unblock, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Unblock, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
|
||||
trigger(Event:Exclusive_Unblock, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Exclusive_Unblock, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||||
trigger(Event:Dirty_Writeback, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Dirty_Writeback, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
|
||||
trigger(Event:Clean_Writeback, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Clean_Writeback, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Data, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||
trigger(Event:DMA_ACK, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:DMA_ACK, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
@ -265,21 +265,21 @@ machine(Directory, "Directory protocol")
|
|||
if (requestQueue_in.isReady()) {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:GETS, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:GETX, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
|
||||
trigger(Event:PUTX, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
|
||||
trigger(Event:PUTO, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
|
||||
trigger(Event:PUTO_SHARERS, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
|
||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
|
||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
@ -292,9 +292,9 @@ machine(Directory, "Directory protocol")
|
|||
if (memQueue_in.isReady()) {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||
trigger(Event:Memory_Data, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||
trigger(Event:Memory_Ack, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -308,7 +308,7 @@ machine(Directory, "Directory protocol")
|
|||
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := MachineType:Directory;
|
||||
|
@ -321,7 +321,7 @@ machine(Directory, "Directory protocol")
|
|||
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_NACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := MachineType:Directory;
|
||||
|
@ -347,7 +347,7 @@ machine(Directory, "Directory protocol")
|
|||
action(d_sendDataMsg, "d", desc="Send data to requestor") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:Directory;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
|
@ -367,7 +367,7 @@ machine(Directory, "Directory protocol")
|
|||
action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:Directory;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -388,11 +388,11 @@ machine(Directory, "Directory protocol")
|
|||
action(f_forwardRequest, "f", desc="Forward request to owner") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
|
||||
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Addr).Owner);
|
||||
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
|
||||
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
|
||||
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
|
||||
out_msg.Acks := out_msg.Acks - 1;
|
||||
|
@ -405,11 +405,11 @@ machine(Directory, "Directory protocol")
|
|||
action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
|
||||
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Addr).Owner);
|
||||
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
|
||||
out_msg.Acks := getDirectoryEntry(address).Sharers.count();
|
||||
if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
|
||||
out_msg.Acks := out_msg.Acks - 1;
|
||||
|
@ -421,16 +421,16 @@ machine(Directory, "Directory protocol")
|
|||
|
||||
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
if ((getDirectoryEntry(in_msg.Addr).Sharers.count() > 1) ||
|
||||
((getDirectoryEntry(in_msg.Addr).Sharers.count() > 0) &&
|
||||
(getDirectoryEntry(in_msg.Addr).Sharers.isElement(in_msg.Requestor) == false))) {
|
||||
if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) ||
|
||||
((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) &&
|
||||
(getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
|
||||
// out_msg.Destination := getDirectoryEntry(in_msg.Addr).Sharers;
|
||||
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Addr).Sharers);
|
||||
// out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers;
|
||||
out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers);
|
||||
out_msg.Destination.remove(in_msg.Requestor);
|
||||
out_msg.MessageSize := MessageSizeType:Invalidate_Control;
|
||||
}
|
||||
|
@ -508,7 +508,7 @@ machine(Directory, "Directory protocol")
|
|||
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:Directory;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -523,7 +523,7 @@ machine(Directory, "Directory protocol")
|
|||
action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
|
||||
peek(unblockNetwork_in, ResponseMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.SenderMachine := MachineType:Directory;
|
||||
if (is_valid(tbe)) {
|
||||
|
@ -541,7 +541,7 @@ machine(Directory, "Directory protocol")
|
|||
peek (requestQueue_in, RequestMsg) {
|
||||
TBEs.allocate(address);
|
||||
set_tbe(TBEs[address]);
|
||||
tbe.PhysicalAddress := in_msg.Addr;
|
||||
tbe.PhysicalAddress := in_msg.addr;
|
||||
tbe.Len := in_msg.Len;
|
||||
tbe.DataBlk := in_msg.DataBlk;
|
||||
tbe.Requestor := in_msg.Requestor;
|
||||
|
|
|
@ -123,15 +123,15 @@ machine(DMA, "DMA Controller")
|
|||
if (dmaResponseQueue_in.isReady()) {
|
||||
peek( dmaResponseQueue_in, ResponseMsg) {
|
||||
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
|
||||
trigger(Event:DMA_Ack, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
||||
in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:Data, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:Inv_Ack, makeLineAddress(in_msg.Addr),
|
||||
TBEs[makeLineAddress(in_msg.Addr)]);
|
||||
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
|
||||
TBEs[makeLineAddress(in_msg.addr)]);
|
||||
} else {
|
||||
error("Invalid response type");
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ machine(DMA, "DMA Controller")
|
|||
if (triggerQueue_in.isReady()) {
|
||||
peek(triggerQueue_in, TriggerMsg) {
|
||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||
trigger(Event:All_Acks, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ machine(DMA, "DMA Controller")
|
|||
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
|
||||
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||
enqueue(reqToDirectory_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := in_msg.PhysicalAddress;
|
||||
out_msg.addr := in_msg.PhysicalAddress;
|
||||
out_msg.Type := CoherenceRequestType:DMA_READ;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
out_msg.Len := in_msg.Len;
|
||||
|
@ -170,7 +170,7 @@ machine(DMA, "DMA Controller")
|
|||
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
|
||||
peek(dmaRequestQueue_in, SequencerMsg) {
|
||||
enqueue(reqToDirectory_out, RequestMsg, request_latency) {
|
||||
out_msg.Addr := in_msg.PhysicalAddress;
|
||||
out_msg.addr := in_msg.PhysicalAddress;
|
||||
out_msg.Type := CoherenceRequestType:DMA_WRITE;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
out_msg.Len := in_msg.Len;
|
||||
|
@ -190,7 +190,7 @@ machine(DMA, "DMA Controller")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumAcks == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:ALL_ACKS;
|
||||
}
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ machine(DMA, "DMA Controller")
|
|||
|
||||
action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
|
||||
enqueue(respToDirectory_out, ResponseMsg, response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.Sender := machineID;
|
||||
|
|
|
@ -69,7 +69,7 @@ enumeration(TriggerType, desc="...") {
|
|||
|
||||
// TriggerMsg
|
||||
structure(TriggerMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
TriggerType Type, desc="Type of trigger";
|
||||
|
||||
bool functionalRead(Packet *pkt) {
|
||||
|
@ -85,7 +85,7 @@ structure(TriggerMsg, desc="...", interface="Message") {
|
|||
|
||||
// RequestMsg (and also forwarded requests)
|
||||
structure(RequestMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
int Len, desc="Length of Request";
|
||||
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
|
||||
MachineID Requestor, desc="Node who initiated the request";
|
||||
|
@ -101,20 +101,20 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
// Read only those messages that contain the data
|
||||
if (Type == CoherenceRequestType:DMA_READ ||
|
||||
Type == CoherenceRequestType:DMA_WRITE) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool functionalWrite(Packet *pkt) {
|
||||
// No check required since all messages are written
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseMsg (and also unblock requests)
|
||||
structure(ResponseMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
|
||||
MachineID Sender, desc="Node who sent the data";
|
||||
MachineType SenderMachine, desc="type of component sending msg";
|
||||
|
@ -130,13 +130,13 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
||||
Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA ||
|
||||
Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool functionalWrite(Packet *pkt) {
|
||||
// No check required since all messages are written
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
// TBE fields
|
||||
structure(TBE, desc="...") {
|
||||
Address Addr, desc="Physical address for this TBE";
|
||||
Address addr, desc="Physical address for this TBE";
|
||||
State TBEState, desc="Transient state";
|
||||
int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
|
||||
Address PC, desc="Program counter of request";
|
||||
|
@ -494,47 +494,47 @@ machine(L1Cache, "Token protocol")
|
|||
// Persistent Network
|
||||
in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
|
||||
if (persistentNetwork_in.isReady()) {
|
||||
peek(persistentNetwork_in, PersistentMsg, block_on="Addr") {
|
||||
peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
// Apply the lockdown or unlockdown message to the table
|
||||
if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
||||
persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
|
||||
persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
|
||||
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
||||
persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
|
||||
persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
|
||||
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
||||
persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
|
||||
persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
||||
// React to the message based on the current state of the table
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := L1_TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := L1_TBEs[in_msg.addr];
|
||||
|
||||
if (persistentTable.isLocked(in_msg.Addr)) {
|
||||
if (persistentTable.findSmallest(in_msg.Addr) == machineID) {
|
||||
if (persistentTable.isLocked(in_msg.addr)) {
|
||||
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
||||
// Our Own Lock - this processor is highest priority
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Addr,
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
} else {
|
||||
if (persistentTable.typeOfSmallest(in_msg.Addr) == AccessType:Read) {
|
||||
if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
|
||||
if (getTokens(cache_entry) == 1 ||
|
||||
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||
trigger(Event:Persistent_GETS_Last_Token, in_msg.Addr,
|
||||
trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Persistent_GETS, in_msg.Addr,
|
||||
trigger(Event:Persistent_GETS, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
} else {
|
||||
trigger(Event:Persistent_GETX, in_msg.Addr,
|
||||
trigger(Event:Persistent_GETX, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Unlock case - no entries in the table
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Addr,
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
|
@ -544,42 +544,42 @@ machine(L1Cache, "Token protocol")
|
|||
// Response Network
|
||||
in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
|
||||
if (responseNetwork_in.isReady()) {
|
||||
peek(responseNetwork_in, ResponseMsg, block_on="Addr") {
|
||||
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := L1_TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := L1_TBEs[in_msg.addr];
|
||||
|
||||
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
|
||||
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
|
||||
|
||||
if (in_msg.Sender == mapAddressToRange(in_msg.Addr,
|
||||
if (in_msg.Sender == mapAddressToRange(in_msg.addr,
|
||||
MachineType:L2Cache, l2_select_low_bit,
|
||||
l2_select_num_bits, intToID(0))) {
|
||||
|
||||
// came from an off-chip L2 cache
|
||||
if (is_valid(tbe)) {
|
||||
// L1_TBEs[in_msg.Addr].ExternalResponse := true;
|
||||
// profile_offchipL2_response(in_msg.Addr);
|
||||
// L1_TBEs[in_msg.addr].ExternalResponse := true;
|
||||
// profile_offchipL2_response(in_msg.addr);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// profile_onchipL2_response(in_msg.Addr );
|
||||
// profile_onchipL2_response(in_msg.addr );
|
||||
}
|
||||
} else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
|
||||
if (is_valid(tbe)) {
|
||||
setExternalResponse(tbe);
|
||||
// profile_memory_response( in_msg.Addr);
|
||||
// profile_memory_response( in_msg.addr);
|
||||
}
|
||||
} else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
|
||||
//if (isLocalProcessor(machineID, in_msg.Sender) == false) {
|
||||
//if (is_valid(tbe)) {
|
||||
// tbe.ExternalResponse := true;
|
||||
// profile_offchipL1_response(in_msg.Addr );
|
||||
// profile_offchipL1_response(in_msg.addr );
|
||||
//}
|
||||
//}
|
||||
//else {
|
||||
// profile_onchipL1_response(in_msg.Addr );
|
||||
// profile_onchipL1_response(in_msg.addr );
|
||||
//}
|
||||
} else {
|
||||
error("unexpected SenderMachine");
|
||||
|
@ -589,21 +589,21 @@ machine(L1Cache, "Token protocol")
|
|||
if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
||||
trigger(Event:Data_Owner, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_Owner, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||
trigger(Event:Data_Shared, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_Shared, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
} else {
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||
trigger(Event:Ack_All_Tokens, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||
trigger(Event:Data_All_Tokens, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -615,40 +615,40 @@ machine(L1Cache, "Token protocol")
|
|||
// Request Network
|
||||
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
|
||||
if (requestNetwork_in.isReady()) {
|
||||
peek(requestNetwork_in, RequestMsg, block_on="Addr") {
|
||||
peek(requestNetwork_in, RequestMsg, block_on="addr") {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := L1_TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := L1_TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
if (in_msg.isLocal) {
|
||||
trigger(Event:Transient_Local_GETX, in_msg.Addr,
|
||||
trigger(Event:Transient_Local_GETX, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
else {
|
||||
trigger(Event:Transient_GETX, in_msg.Addr,
|
||||
trigger(Event:Transient_GETX, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
if (getTokens(cache_entry) == 1 ||
|
||||
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||
if (in_msg.isLocal) {
|
||||
trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Addr,
|
||||
trigger(Event:Transient_Local_GETS_Last_Token, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
else {
|
||||
trigger(Event:Transient_GETS_Last_Token, in_msg.Addr,
|
||||
trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (in_msg.isLocal) {
|
||||
trigger(Event:Transient_Local_GETS, in_msg.Addr,
|
||||
trigger(Event:Transient_Local_GETS, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
else {
|
||||
trigger(Event:Transient_GETS, in_msg.Addr,
|
||||
trigger(Event:Transient_GETS, in_msg.addr,
|
||||
cache_entry, tbe);
|
||||
}
|
||||
}
|
||||
|
@ -748,7 +748,7 @@ machine(L1Cache, "Token protocol")
|
|||
// Issue a persistent request if possible
|
||||
if (okToIssueStarving(address, machineID) && (starving == false)) {
|
||||
enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
@ -800,7 +800,7 @@ machine(L1Cache, "Token protocol")
|
|||
} else {
|
||||
// Make a normal request
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
|
@ -819,7 +819,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
// send to other local L1s, with local bit set
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
//
|
||||
|
@ -867,7 +867,7 @@ machine(L1Cache, "Token protocol")
|
|||
// Issue a persistent request if possible
|
||||
if ( okToIssueStarving(address, machineID) && (starving == false)) {
|
||||
enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
@ -919,7 +919,7 @@ machine(L1Cache, "Token protocol")
|
|||
} else {
|
||||
// Make a normal request
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
|
||||
|
@ -940,7 +940,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
// send to other local L1s too
|
||||
enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.isLocal := true;
|
||||
|
@ -981,7 +981,7 @@ machine(L1Cache, "Token protocol")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
// FIXME, should use a 3rd vnet
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -996,7 +996,7 @@ machine(L1Cache, "Token protocol")
|
|||
action(c_ownedReplacement, "c", desc="Issue writeback") {
|
||||
assert(is_valid(cache_entry));
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
|
@ -1020,7 +1020,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert (cache_entry.Tokens > 0);
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
|
@ -1042,7 +1042,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (cache_entry.Tokens > 0) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
|
||||
out_msg.Destination.add(mapAddressToRange(address,
|
||||
|
@ -1067,7 +1067,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -1091,7 +1091,7 @@ machine(L1Cache, "Token protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -1109,7 +1109,7 @@ machine(L1Cache, "Token protocol")
|
|||
}
|
||||
else if (cache_entry.Tokens > 1) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -1133,7 +1133,7 @@ machine(L1Cache, "Token protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
assert(is_valid(cache_entry));
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -1156,7 +1156,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (cache_entry.Tokens > 0) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
if (cache_entry.Tokens > (max_tokens() / 2)) {
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
} else {
|
||||
|
@ -1178,7 +1178,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens > 0);
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -1197,7 +1197,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(cache_entry.Tokens > 0);
|
||||
if (cache_entry.Tokens > 1) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
if (cache_entry.Tokens > (max_tokens() / 2)) {
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
} else {
|
||||
|
@ -1227,7 +1227,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -1252,7 +1252,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -1272,7 +1272,7 @@ machine(L1Cache, "Token protocol")
|
|||
// assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
|
||||
// FIXME, should use a 3rd vnet in some cases
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -1384,7 +1384,7 @@ machine(L1Cache, "Token protocol")
|
|||
|
||||
action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:INV;
|
||||
out_msg.Tokens := 0;
|
||||
out_msg.Sender := machineID;
|
||||
|
@ -1401,7 +1401,7 @@ machine(L1Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(in_msg.Tokens != 0);
|
||||
DPRINTF(RubySlicc, "L1 received tokens for address: %s, tokens: %d\n",
|
||||
in_msg.Addr, in_msg.Tokens);
|
||||
in_msg.addr, in_msg.Tokens);
|
||||
cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
|
||||
DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
|
||||
|
||||
|
@ -1418,7 +1418,7 @@ machine(L1Cache, "Token protocol")
|
|||
// assert(starving);
|
||||
outstandingRequests := outstandingRequests - 1;
|
||||
enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
@ -1465,7 +1465,7 @@ machine(L1Cache, "Token protocol")
|
|||
if (cache_entry.Tokens > 0) {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
if (cache_entry.Tokens > (max_tokens() / 2)) {
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
} else {
|
||||
|
|
|
@ -331,33 +331,33 @@ machine(L2Cache, "Token protocol")
|
|||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
||||
persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
|
||||
persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
|
||||
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
||||
persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
|
||||
persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
|
||||
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
||||
persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
|
||||
persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
// React to the message based on the current state of the table
|
||||
if (persistentTable.isLocked(in_msg.Addr)) {
|
||||
if (persistentTable.isLocked(in_msg.addr)) {
|
||||
|
||||
if (persistentTable.typeOfSmallest(in_msg.Addr) == AccessType:Read) {
|
||||
if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
|
||||
if (getTokens(cache_entry) == 1 ||
|
||||
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||
trigger(Event:Persistent_GETS_Last_Token, in_msg.Addr,
|
||||
trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
|
||||
cache_entry);
|
||||
} else {
|
||||
trigger(Event:Persistent_GETS, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
|
||||
}
|
||||
} else {
|
||||
trigger(Event:Persistent_GETX, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
|
||||
}
|
||||
}
|
||||
else {
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -370,16 +370,16 @@ machine(L2Cache, "Token protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:Transient_GETX, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
if (getTokens(cache_entry) == 1) {
|
||||
trigger(Event:Transient_GETS_Last_Token, in_msg.Addr,
|
||||
trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
|
||||
cache_entry);
|
||||
}
|
||||
else {
|
||||
trigger(Event:Transient_GETS, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
|
||||
}
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
|
@ -392,16 +392,16 @@ machine(L2Cache, "Token protocol")
|
|||
if (L1requestNetwork_in.isReady()) {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:L1_GETX, in_msg.Addr, cache_entry);
|
||||
trigger(Event:L1_GETX, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
if (getTokens(cache_entry) == 1 ||
|
||||
getTokens(cache_entry) == (max_tokens() / 2) + 1) {
|
||||
trigger(Event:L1_GETS_Last_Token, in_msg.Addr, cache_entry);
|
||||
trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
|
||||
}
|
||||
else {
|
||||
trigger(Event:L1_GETS, in_msg.Addr, cache_entry);
|
||||
trigger(Event:L1_GETS, in_msg.addr, cache_entry);
|
||||
}
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
|
@ -416,80 +416,80 @@ machine(L2Cache, "Token protocol")
|
|||
if (responseNetwork_in.isReady()) {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
|
||||
if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
||||
trigger(Event:Data_Owner, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Data_Owner, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||
trigger(Event:Data_Shared, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Data_Shared, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
|
||||
in_msg.Type == CoherenceResponseType:WB_OWNED ||
|
||||
in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
|
||||
if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
|
||||
if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
|
||||
|
||||
// either room is available or the block is already present
|
||||
|
||||
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
|
||||
assert(in_msg.Dirty == false);
|
||||
trigger(Event:Writeback_Tokens, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
assert(in_msg.Dirty == false);
|
||||
trigger(Event:Writeback_Shared_Data, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
|
||||
}
|
||||
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
|
||||
//assert(in_msg.Dirty == false);
|
||||
trigger(Event:Writeback_Owned, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
|
||||
}
|
||||
}
|
||||
else {
|
||||
trigger(Event:L2_Replacement,
|
||||
L2cache.cacheProbe(in_msg.Addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
|
||||
L2cache.cacheProbe(in_msg.addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
||||
trigger(Event:L1_INV, in_msg.Addr, cache_entry);
|
||||
trigger(Event:L1_INV, in_msg.addr, cache_entry);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
} else {
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
assert(in_msg.Tokens < (max_tokens() / 2));
|
||||
trigger(Event:Ack_All_Tokens, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
|
||||
in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||
trigger(Event:Data_All_Tokens, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
|
||||
in_msg.Type == CoherenceResponseType:WB_OWNED ||
|
||||
in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
|
||||
if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
|
||||
|
||||
// either room is available or the block is already present
|
||||
|
||||
if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
|
||||
assert(in_msg.Dirty == false);
|
||||
assert( (getState(cache_entry, in_msg.Addr) != State:NP)
|
||||
&& (getState(cache_entry, in_msg.Addr) != State:I) );
|
||||
trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
|
||||
assert( (getState(cache_entry, in_msg.addr) != State:NP)
|
||||
&& (getState(cache_entry, in_msg.addr) != State:I) );
|
||||
trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
assert(in_msg.Dirty == false);
|
||||
trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
|
||||
}
|
||||
else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
|
||||
trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
|
||||
trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
|
||||
}
|
||||
}
|
||||
else {
|
||||
trigger(Event:L2_Replacement,
|
||||
L2cache.cacheProbe(in_msg.Addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
|
||||
L2cache.cacheProbe(in_msg.addr),
|
||||
getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceResponseType:INV) {
|
||||
trigger(Event:L1_INV, in_msg.Addr, cache_entry);
|
||||
trigger(Event:L1_INV, in_msg.addr, cache_entry);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Unexpected message");
|
||||
|
@ -508,7 +508,7 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
// if this is a retry or no local sharers, broadcast normally
|
||||
enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.RetryNum := in_msg.RetryNum;
|
||||
|
@ -537,7 +537,7 @@ machine(L2Cache, "Token protocol")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
// FIXME, should use a 3rd vnet
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -553,7 +553,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (cache_entry.Tokens > 0) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -567,7 +567,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
|
||||
assert(is_valid(cache_entry));
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.Tokens := cache_entry.Tokens;
|
||||
|
@ -590,7 +590,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -603,7 +603,7 @@ machine(L2Cache, "Token protocol")
|
|||
}
|
||||
else {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -621,7 +621,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -639,7 +639,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
if (cache_entry.Tokens > 0) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -654,7 +654,7 @@ machine(L2Cache, "Token protocol")
|
|||
action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
|
||||
assert(is_valid(cache_entry));
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -673,7 +673,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(cache_entry.Tokens > 0);
|
||||
if (cache_entry.Tokens > 1) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -690,7 +690,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -707,7 +707,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -726,7 +726,7 @@ machine(L2Cache, "Token protocol")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
// FIXME, should use a 3rd vnet in some cases
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -743,7 +743,7 @@ machine(L2Cache, "Token protocol")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
// FIXME, should use a 3rd vnet in some cases
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
} else {
|
||||
|
@ -765,7 +765,7 @@ machine(L2Cache, "Token protocol")
|
|||
peek(responseNetwork_in, ResponseMsg) {
|
||||
// FIXME, should use a 3rd vnet in some cases
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -780,20 +780,20 @@ machine(L2Cache, "Token protocol")
|
|||
|
||||
action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
removeSharer(in_msg.Addr, machineIDToNodeID(in_msg.Sender));
|
||||
removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
|
||||
}
|
||||
}
|
||||
|
||||
action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
|
||||
if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
|
||||
//profile_filter_action(1);
|
||||
DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
|
||||
in_msg.RetryNum);
|
||||
}
|
||||
else {
|
||||
enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
|
||||
out_msg.Addr := in_msg.Addr;
|
||||
out_msg.addr := in_msg.addr;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
|
||||
//
|
||||
|
@ -819,7 +819,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens > 0);
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -837,7 +837,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -855,7 +855,7 @@ machine(L2Cache, "Token protocol")
|
|||
assert(is_valid(cache_entry));
|
||||
// assert(cache_entry.Tokens == max_tokens());
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -904,9 +904,9 @@ machine(L2Cache, "Token protocol")
|
|||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
|
||||
if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
setNewWriter(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
|
||||
setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
addNewSharer(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
|
||||
addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -930,7 +930,7 @@ machine(L2Cache, "Token protocol")
|
|||
if (cache_entry.Tokens > 0) {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -948,7 +948,7 @@ machine(L2Cache, "Token protocol")
|
|||
if (cache_entry.Tokens > 0) {
|
||||
peek(L1requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
|
|
@ -280,9 +280,9 @@ machine(Directory, "Token protocol")
|
|||
if (memQueue_in.isReady()) {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||
trigger(Event:Memory_Data, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||
trigger(Event:Memory_Ack, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -303,32 +303,32 @@ machine(Directory, "Token protocol")
|
|||
if (responseNetwork_in.isReady()) {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (getDirectoryEntry(in_msg.Addr).Tokens + in_msg.Tokens == max_tokens()) {
|
||||
if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
|
||||
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
|
||||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
||||
trigger(Event:Data_All_Tokens, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data_All_Tokens, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
||||
trigger(Event:Ack_Owner_All_Tokens, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:Ack_All_Tokens, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Ack_All_Tokens, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
}
|
||||
} else {
|
||||
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
|
||||
trigger(Event:Data_Owner, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Data_Owner, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
|
||||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
|
||||
trigger(Event:Tokens, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Tokens, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
|
||||
trigger(Event:Ack_Owner, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Ack_Owner, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -346,52 +346,52 @@ machine(Directory, "Token protocol")
|
|||
if (distributed_persistent) {
|
||||
// Apply the lockdown or unlockdown message to the table
|
||||
if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
||||
persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
|
||||
persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
|
||||
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
||||
persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
|
||||
persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
|
||||
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
||||
persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
|
||||
persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
||||
// React to the message based on the current state of the table
|
||||
if (persistentTable.isLocked(in_msg.Addr)) {
|
||||
if (persistentTable.findSmallest(in_msg.Addr) == machineID) {
|
||||
if (getDirectoryEntry(in_msg.Addr).Tokens > 0) {
|
||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
if (persistentTable.isLocked(in_msg.addr)) {
|
||||
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
||||
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
|
||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else {
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
}
|
||||
} else {
|
||||
// locked
|
||||
trigger(Event:Lockdown, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
|
||||
}
|
||||
} else {
|
||||
// unlocked
|
||||
trigger(Event:Unlockdown, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (persistentTable.findSmallest(in_msg.Addr) == machineID) {
|
||||
if (getDirectoryEntry(in_msg.Addr).Tokens > 0) {
|
||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
|
||||
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
|
||||
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
} else {
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.Addr,
|
||||
TBEs[in_msg.Addr]);
|
||||
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
|
||||
TBEs[in_msg.addr]);
|
||||
}
|
||||
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
|
||||
// locked
|
||||
trigger(Event:Lockdown, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
|
||||
// locked
|
||||
trigger(Event:Lockdown, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
|
||||
// unlocked
|
||||
trigger(Event:Unlockdown, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
@ -405,9 +405,9 @@ machine(Directory, "Token protocol")
|
|||
peek(requestNetwork_in, RequestMsg) {
|
||||
assert(in_msg.Destination.isElement(machineID));
|
||||
if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
trigger(Event:GETS, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETX) {
|
||||
trigger(Event:GETX, in_msg.Addr, TBEs[in_msg.Addr]);
|
||||
trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
@ -442,11 +442,11 @@ machine(Directory, "Token protocol")
|
|||
if (getDirectoryEntry(address).Tokens > 0) {
|
||||
peek(requestNetwork_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
out_msg.Tokens := getDirectoryEntry(in_msg.Addr).Tokens;
|
||||
out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
|
||||
out_msg.MessageSize := MessageSizeType:Response_Control;
|
||||
}
|
||||
}
|
||||
|
@ -457,7 +457,7 @@ machine(Directory, "Token protocol")
|
|||
action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
|
||||
if (okToIssueStarving(address, machineID) && (starving == false)) {
|
||||
enqueue(persistentNetwork_out, PersistentMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
@ -502,7 +502,7 @@ machine(Directory, "Token protocol")
|
|||
//
|
||||
assert(getDirectoryEntry(address).Tokens != max_tokens());
|
||||
enqueue(requestNetwork_out, RequestMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
|
||||
|
@ -525,7 +525,7 @@ machine(Directory, "Token protocol")
|
|||
action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
|
||||
if (okToIssueStarving(address, machineID) && (starving == false)) {
|
||||
enqueue(persistentNetwork_out, PersistentMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
@ -566,7 +566,7 @@ machine(Directory, "Token protocol")
|
|||
action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
|
||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||
enqueue(requestNetwork_out, RequestMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
|
||||
|
@ -590,7 +590,7 @@ machine(Directory, "Token protocol")
|
|||
// Only send a message if we have tokens to send
|
||||
if (getDirectoryEntry(address).Tokens > 0) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -604,12 +604,12 @@ machine(Directory, "Token protocol")
|
|||
action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
assert(getDirectoryEntry(address).Tokens > 0);
|
||||
out_msg.Tokens := getDirectoryEntry(in_msg.Addr).Tokens;
|
||||
out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
|
||||
out_msg.DataBlk := in_msg.DataBlk;
|
||||
out_msg.Dirty := false;
|
||||
out_msg.MessageSize := MessageSizeType:Response_Data;
|
||||
|
@ -621,7 +621,7 @@ machine(Directory, "Token protocol")
|
|||
action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -637,7 +637,7 @@ machine(Directory, "Token protocol")
|
|||
|
||||
action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -706,7 +706,7 @@ machine(Directory, "Token protocol")
|
|||
assert(starving);
|
||||
|
||||
enqueue(persistentNetwork_out, PersistentMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache);
|
||||
|
@ -787,7 +787,7 @@ machine(Directory, "Token protocol")
|
|||
action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
|
||||
peek(responseNetwork_in, ResponseMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
@ -832,7 +832,7 @@ machine(Directory, "Token protocol")
|
|||
// token. In essence we're converting an ACK_OWNER message to a
|
||||
// DATA_OWNER message, keeping the number of tokens the same.
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_OWNER;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(persistentTable.findSmallest(address));
|
||||
|
|
|
@ -58,7 +58,7 @@ enumeration(CoherenceResponseType, desc="...") {
|
|||
|
||||
// PersistentMsg
|
||||
structure(PersistentMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
PersistentRequestType Type, desc="Type of starvation request";
|
||||
MachineID Requestor, desc="Node who initiated the request";
|
||||
NetDest Destination, desc="Destination set";
|
||||
|
@ -79,7 +79,7 @@ structure(PersistentMsg, desc="...", interface="Message") {
|
|||
|
||||
// RequestMsg
|
||||
structure(RequestMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
|
||||
MachineID Requestor, desc="Node who initiated the request";
|
||||
NetDest Destination, desc="Multicast destination mask";
|
||||
|
@ -102,7 +102,7 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
|
||||
// ResponseMsg
|
||||
structure(ResponseMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
|
||||
MachineID Sender, desc="Node who sent the data";
|
||||
NetDest Destination, desc="Node to whom the data is sent";
|
||||
|
@ -113,12 +113,12 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
|
||||
bool functionalRead(Packet *pkt) {
|
||||
// No check being carried out on the message type. Would be added later.
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
bool functionalWrite(Packet *pkt) {
|
||||
// No check required since all messages are written.
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -336,15 +336,15 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
if (triggerQueue_in.isReady()) {
|
||||
peek(triggerQueue_in, TriggerMsg) {
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == TriggerType:L2_to_L1) {
|
||||
trigger(Event:Complete_L2_to_L1, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||
trigger(Event:All_acks, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
||||
trigger(Event:All_acks_no_sharers, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -357,21 +357,21 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
// Response Network
|
||||
in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
|
||||
if (responseToCache_in.isReady()) {
|
||||
peek(responseToCache_in, ResponseMsg, block_on="Addr") {
|
||||
peek(responseToCache_in, ResponseMsg, block_on="addr") {
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
||||
trigger(Event:Shared_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||
trigger(Event:Shared_Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Exclusive_Data, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -382,38 +382,38 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
// Forward Network
|
||||
in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
|
||||
if (forwardToCache_in.isReady()) {
|
||||
peek(forwardToCache_in, RequestMsg, block_on="Addr") {
|
||||
peek(forwardToCache_in, RequestMsg, block_on="addr") {
|
||||
|
||||
Entry cache_entry := getCacheEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
Entry cache_entry := getCacheEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
|
||||
if ((in_msg.Type == CoherenceRequestType:GETX) ||
|
||||
(in_msg.Type == CoherenceRequestType:GETF)) {
|
||||
trigger(Event:Other_GETX, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
|
||||
trigger(Event:Merged_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:GETS) {
|
||||
if (machineCount(MachineType:L1Cache) > 1) {
|
||||
if (is_valid(cache_entry)) {
|
||||
if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
|
||||
trigger(Event:Other_GETS_No_Mig, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Other_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else {
|
||||
trigger(Event:Other_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else {
|
||||
trigger(Event:NC_DMA_GETS, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
|
||||
}
|
||||
} else if (in_msg.Type == CoherenceRequestType:INV) {
|
||||
trigger(Event:Invalidate, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
|
||||
trigger(Event:Writeback_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
|
||||
trigger(Event:Writeback_Nack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
|
||||
trigger(Event:Block_Ack, in_msg.Addr, cache_entry, tbe);
|
||||
trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -555,7 +555,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(a_issueGETS, "a", desc="Issue GETS") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -570,7 +570,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(b_issueGETX, "b", desc="Issue GETX") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -586,7 +586,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
if (machineCount(MachineType:L1Cache) > 1) {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -602,7 +602,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(bf_issueGETF, "bf", desc="Issue GETF") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETF;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -618,7 +618,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -641,7 +641,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -662,7 +662,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
|
||||
action(d_issuePUT, "d", desc="Issue PUT") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUT;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -672,7 +672,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
|
||||
action(df_issuePUTF, "df", desc="Issue PUTF") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:PUTF;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -684,7 +684,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -707,7 +707,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -731,7 +731,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -755,7 +755,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination := in_msg.MergedRequestors;
|
||||
|
@ -775,7 +775,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination := in_msg.MergedRequestors;
|
||||
|
@ -794,7 +794,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
|
||||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -811,7 +811,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
|
||||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -827,7 +827,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
|
||||
action(g_sendUnblock, "g", desc="Send unblock to memory") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -837,7 +837,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
|
||||
action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCKM;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -848,7 +848,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:UNBLOCKS;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.CurOwner := tbe.CurOwner;
|
||||
|
@ -1013,7 +1013,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
|
||||
action(ll_L2toL1Transfer, "ll", desc="") {
|
||||
enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:L2_to_L1;
|
||||
}
|
||||
}
|
||||
|
@ -1022,7 +1022,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumPendingMsgs == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
if (tbe.Sharers) {
|
||||
out_msg.Type := TriggerType:ALL_ACKS;
|
||||
} else {
|
||||
|
@ -1047,7 +1047,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
assert(in_msg.Requestor != machineID);
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -1072,7 +1072,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
assert(in_msg.Requestor != machineID);
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -1096,7 +1096,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
peek(forwardToCache_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:DATA_SHARED;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination := in_msg.MergedRequestors;
|
||||
|
@ -1115,7 +1115,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.Dirty := tbe.Dirty;
|
||||
|
@ -1146,7 +1146,7 @@ machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
|
|||
action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
|
||||
enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
out_msg.DataBlk := tbe.DataBlk;
|
||||
|
|
|
@ -316,19 +316,19 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
|
||||
if (triggerQueue_in.isReady()) {
|
||||
peek(triggerQueue_in, TriggerMsg) {
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == TriggerType:ALL_ACKS) {
|
||||
trigger(Event:All_acks_and_owner_data, in_msg.Addr,
|
||||
trigger(Event:All_acks_and_owner_data, in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
|
||||
trigger(Event:All_acks_and_shared_data, in_msg.Addr,
|
||||
trigger(Event:All_acks_and_shared_data, in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
|
||||
trigger(Event:All_acks_and_data_no_sharers, in_msg.Addr,
|
||||
trigger(Event:All_acks_and_data_no_sharers, in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
|
||||
trigger(Event:All_Unblocks, in_msg.Addr,
|
||||
trigger(Event:All_Unblocks, in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
|
@ -340,23 +340,23 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
|
||||
if (unblockNetwork_in.isReady()) {
|
||||
peek(unblockNetwork_in, ResponseMsg) {
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
|
||||
trigger(Event:Unblock, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
||||
trigger(Event:UnblockS, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:UnblockS, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
|
||||
trigger(Event:UnblockM, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:UnblockM, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
|
||||
trigger(Event:Writeback_Clean, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Writeback_Clean, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
|
||||
trigger(Event:Writeback_Dirty, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Writeback_Dirty, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
|
||||
trigger(Event:Writeback_Exclusive_Clean, in_msg.Addr,
|
||||
trigger(Event:Writeback_Exclusive_Clean, in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
|
||||
trigger(Event:Writeback_Exclusive_Dirty, in_msg.Addr,
|
||||
trigger(Event:Writeback_Exclusive_Dirty, in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
|
@ -369,18 +369,18 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
|
||||
if (responseToDir_in.isReady()) {
|
||||
peek(responseToDir_in, ResponseMsg) {
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == CoherenceResponseType:ACK) {
|
||||
trigger(Event:Ack, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
|
||||
trigger(Event:Shared_Ack, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Shared_Ack, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
|
||||
trigger(Event:Shared_Data, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Shared_Data, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA) {
|
||||
trigger(Event:Data, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Data, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
|
||||
trigger(Event:Exclusive_Data, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Exclusive_Data, in_msg.addr, pf_entry, tbe);
|
||||
} else {
|
||||
error("Unexpected message");
|
||||
}
|
||||
|
@ -392,12 +392,12 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
|
||||
if (memQueue_in.isReady()) {
|
||||
peek(memQueue_in, MemoryMsg) {
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
|
||||
trigger(Event:Memory_Data, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
|
||||
trigger(Event:Memory_Ack, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:Memory_Ack, in_msg.addr, pf_entry, tbe);
|
||||
} else {
|
||||
DPRINTF(RubySlicc, "%d\n", in_msg.Type);
|
||||
error("Invalid message");
|
||||
|
@ -409,30 +409,30 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
|
||||
if (requestQueue_in.isReady()) {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
|
||||
TBE tbe := TBEs[in_msg.Addr];
|
||||
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
|
||||
TBE tbe := TBEs[in_msg.addr];
|
||||
if (in_msg.Type == CoherenceRequestType:PUT) {
|
||||
trigger(Event:PUT, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
|
||||
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
|
||||
trigger(Event:PUTF, in_msg.Addr, pf_entry, tbe);
|
||||
trigger(Event:PUTF, in_msg.addr, pf_entry, tbe);
|
||||
} else {
|
||||
if (probe_filter_enabled || full_bit_dir_enabled) {
|
||||
if (is_valid(pf_entry)) {
|
||||
trigger(cache_request_to_event(in_msg.Type), in_msg.Addr,
|
||||
trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else {
|
||||
if (probeFilter.cacheAvail(in_msg.Addr)) {
|
||||
trigger(cache_request_to_event(in_msg.Type), in_msg.Addr,
|
||||
if (probeFilter.cacheAvail(in_msg.addr)) {
|
||||
trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
} else {
|
||||
trigger(Event:Pf_Replacement,
|
||||
probeFilter.cacheProbe(in_msg.Addr),
|
||||
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Addr)),
|
||||
TBEs[probeFilter.cacheProbe(in_msg.Addr)]);
|
||||
probeFilter.cacheProbe(in_msg.addr),
|
||||
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
|
||||
TBEs[probeFilter.cacheProbe(in_msg.addr)]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trigger(cache_request_to_event(in_msg.Type), in_msg.Addr,
|
||||
trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
|
||||
pf_entry, tbe);
|
||||
}
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_ACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -517,7 +517,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
peek(requestQueue_in, RequestMsg) {
|
||||
if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:BLOCK_ACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -530,7 +530,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:WB_NACK;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -689,7 +689,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumPendingMsgs == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
if (tbe.Sharers) {
|
||||
if (tbe.Owned) {
|
||||
out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
|
||||
|
@ -707,7 +707,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
assert(is_valid(tbe));
|
||||
if (tbe.NumPendingMsgs == 0) {
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:ALL_UNBLOCKS;
|
||||
}
|
||||
}
|
||||
|
@ -730,7 +730,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
if (tbe.NumPendingMsgs == 0) {
|
||||
assert(probe_filter_enabled || full_bit_dir_enabled);
|
||||
enqueue(triggerQueue_out, TriggerMsg) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
|
||||
}
|
||||
}
|
||||
|
@ -740,7 +740,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
peek(memQueue_in, MemoryMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := tbe.ResponseType;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
|
||||
|
@ -867,7 +867,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
|
||||
if (fwd_set.count() > 0) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
|
||||
|
@ -882,7 +882,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
} else {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
||||
|
@ -902,7 +902,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
assert(cache_entry.Sharers.count() > 0);
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
|
||||
|
@ -911,7 +911,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
}
|
||||
} else {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
||||
|
@ -925,7 +925,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
if (machineCount(MachineType:L1Cache) > 1) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:INV;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(cache_entry.Owner);
|
||||
|
@ -943,7 +943,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
|
||||
if (fwd_set.count() > 0) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
|
||||
|
@ -956,7 +956,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
}
|
||||
} else {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
||||
|
@ -970,7 +970,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
} else {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(responseNetwork_out, ResponseMsg, 1) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceResponseType:ACK;
|
||||
out_msg.Sender := machineID;
|
||||
out_msg.Destination.add(in_msg.Requestor);
|
||||
|
@ -993,7 +993,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
peek(unblockNetwork_in, ResponseMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
assert(is_valid(tbe));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:MERGED_GETS;
|
||||
out_msg.MergedRequestors := tbe.GetSRequestors;
|
||||
if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
|
||||
|
@ -1014,7 +1014,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(cache_entry.Owner);
|
||||
|
@ -1027,7 +1027,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
} else {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
||||
|
@ -1048,7 +1048,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
if (in_msg.Requestor != cache_entry.Owner) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
assert(is_valid(cache_entry));
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.add(cache_entry.Owner);
|
||||
|
@ -1062,7 +1062,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
} else {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := in_msg.Type;
|
||||
out_msg.Requestor := in_msg.Requestor;
|
||||
out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
|
||||
|
@ -1081,7 +1081,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
if (tbe.NumPendingMsgs > 0) {
|
||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETX;
|
||||
//
|
||||
// Send to all L1 caches, since the requestor is the memory controller
|
||||
|
@ -1100,7 +1100,7 @@ machine(Directory, "AMD Hammer-like protocol")
|
|||
if (tbe.NumPendingMsgs > 0) {
|
||||
peek(dmaRequestQueue_in, DMARequestMsg) {
|
||||
enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:GETS;
|
||||
//
|
||||
// Send to all L1 caches, since the requestor is the memory controller
|
||||
|
|
|
@ -71,7 +71,7 @@ enumeration(TriggerType, desc="...") {
|
|||
|
||||
// TriggerMsg
|
||||
structure(TriggerMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
TriggerType Type, desc="Type of trigger";
|
||||
|
||||
bool functionalRead(Packet *pkt) {
|
||||
|
@ -87,7 +87,7 @@ structure(TriggerMsg, desc="...", interface="Message") {
|
|||
|
||||
// RequestMsg (and also forwarded requests)
|
||||
structure(RequestMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
|
||||
MachineID Requestor, desc="Node who initiated the request";
|
||||
NetDest MergedRequestors, desc="Merge set of read requestors";
|
||||
|
@ -114,7 +114,7 @@ structure(RequestMsg, desc="...", interface="Message") {
|
|||
|
||||
// ResponseMsg (and also unblock requests)
|
||||
structure(ResponseMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
|
||||
MachineID Sender, desc="Node who sent the data";
|
||||
MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
|
||||
|
@ -138,7 +138,7 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
Type == CoherenceResponseType:DATA_EXCLUSIVE ||
|
||||
Type == CoherenceResponseType:WB_DIRTY ||
|
||||
Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -148,7 +148,7 @@ structure(ResponseMsg, desc="...", interface="Message") {
|
|||
// Message type does not matter since all messages are written.
|
||||
// If a protocol reads data from a packet that is not supposed
|
||||
// to hold the data, then the fault lies with the protocol.
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -145,7 +145,7 @@ machine(L1Cache, "Network_test L1 Cache")
|
|||
|
||||
action(a_issueRequest, "a", desc="Issue a request") {
|
||||
enqueue(requestNetwork_out, RequestMsg, issue_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:MSG;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -156,7 +156,7 @@ machine(L1Cache, "Network_test L1 Cache")
|
|||
|
||||
action(b_issueForward, "b", desc="Issue a forward") {
|
||||
enqueue(forwardNetwork_out, RequestMsg, issue_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:MSG;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
@ -166,7 +166,7 @@ machine(L1Cache, "Network_test L1 Cache")
|
|||
|
||||
action(c_issueResponse, "c", desc="Issue a response") {
|
||||
enqueue(responseNetwork_out, RequestMsg, issue_latency) {
|
||||
out_msg.Addr := address;
|
||||
out_msg.addr := address;
|
||||
out_msg.Type := CoherenceRequestType:MSG;
|
||||
out_msg.Requestor := machineID;
|
||||
out_msg.Destination.add(map_Address_to_Directory(address));
|
||||
|
|
|
@ -90,7 +90,7 @@ machine(Directory, "Network_test Directory")
|
|||
if (requestQueue_in.isReady()) {
|
||||
peek(requestQueue_in, RequestMsg) {
|
||||
if (in_msg.Type == CoherenceRequestType:MSG) {
|
||||
trigger(Event:Receive_Request, in_msg.Addr);
|
||||
trigger(Event:Receive_Request, in_msg.addr);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ machine(Directory, "Network_test Directory")
|
|||
if (forwardQueue_in.isReady()) {
|
||||
peek(forwardQueue_in, RequestMsg) {
|
||||
if (in_msg.Type == CoherenceRequestType:MSG) {
|
||||
trigger(Event:Receive_Forward, in_msg.Addr);
|
||||
trigger(Event:Receive_Forward, in_msg.addr);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ machine(Directory, "Network_test Directory")
|
|||
if (responseQueue_in.isReady()) {
|
||||
peek(responseQueue_in, RequestMsg) {
|
||||
if (in_msg.Type == CoherenceRequestType:MSG) {
|
||||
trigger(Event:Receive_Response, in_msg.Addr);
|
||||
trigger(Event:Receive_Response, in_msg.addr);
|
||||
} else {
|
||||
error("Invalid message");
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ enumeration(CoherenceRequestType, desc="...") {
|
|||
|
||||
// RequestMsg (and also forwarded requests)
|
||||
structure(RequestMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
|
||||
MachineID Requestor, desc="Node who initiated the request";
|
||||
NetDest Destination, desc="Multicast destination mask";
|
||||
|
|
|
@ -51,7 +51,7 @@ enumeration(MemoryRequestType, desc="...") {
|
|||
// Message to and from Memory Control
|
||||
|
||||
structure(MemoryMsg, desc="...", interface="Message") {
|
||||
Address Addr, desc="Physical address for this request";
|
||||
Address addr, desc="Physical address for this request";
|
||||
MemoryRequestType Type, desc="Type of memory request (MEMORY_READ or MEMORY_WB)";
|
||||
MachineID Sender, desc="What component sent the data";
|
||||
MachineID OriginalRequestorMachId, desc="What component originally requested";
|
||||
|
@ -63,10 +63,10 @@ structure(MemoryMsg, desc="...", interface="Message") {
|
|||
int Acks, desc="How many acks to expect";
|
||||
|
||||
bool functionalRead(Packet *pkt) {
|
||||
return testAndRead(Addr, DataBlk, pkt);
|
||||
return testAndRead(addr, DataBlk, pkt);
|
||||
}
|
||||
|
||||
bool functionalWrite(Packet *pkt) {
|
||||
return testAndWrite(Addr, DataBlk, pkt);
|
||||
return testAndWrite(addr, DataBlk, pkt);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -310,7 +310,7 @@ AbstractController::recvTimingResp(PacketPtr pkt)
|
|||
assert(pkt->isResponse());
|
||||
|
||||
std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
|
||||
(*msg).m_Addr.setAddress(pkt->getAddr());
|
||||
(*msg).m_addr.setAddress(pkt->getAddr());
|
||||
(*msg).m_Sender = m_machineID;
|
||||
|
||||
SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
|
||||
|
|
Loading…
Reference in a new issue