MOESI_hammer: Fixed uniprocessor DMA bug

This commit is contained in:
Brad Beckmann 2011-07-06 18:44:42 -07:00
parent da1eaaca0e
commit 4f83390781
2 changed files with 30 additions and 1 deletions

View file

@ -545,6 +545,21 @@ machine(L1Cache, "AMD Hammer-like protocol")
} }
} }
action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
if (machineCount(MachineType:L1Cache) > 1) {
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
assert(is_valid(tbe));
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.InitialRequestTime := get_time();
}
}
tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
}
action(bf_issueGETF, "bf", desc="Issue GETF") { action(bf_issueGETF, "bf", desc="Issue GETF") {
enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) { enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
assert(is_valid(tbe)); assert(is_valid(tbe));
@ -921,7 +936,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") { action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToCache_in, ResponseMsg) { peek(responseToCache_in, ResponseMsg) {
assert(in_msg.Acks > 0); assert(in_msg.Acks >= 0);
assert(is_valid(tbe)); assert(is_valid(tbe));
DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender); DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks); DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);

View file

@ -951,6 +951,20 @@ machine(Directory, "AMD Hammer-like protocol")
} }
} }
} }
} else {
peek(requestQueue_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.Dirty := false; // By definition, the block is now clean
out_msg.Acks := 0;
out_msg.SilentAcks := 0;
DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
} }
} }