Ruby: Remove CacheMsg class from SLICC

The goal of the patch is to do away with the CacheMsg class currently in use
in coherence protocols. In place of CacheMsg, the RubyRequest class will used.
This class is already present in slicc_interface/RubyRequest.hh. In fact,
objects of class CacheMsg are generated by copying values from a RubyRequest
object.
This commit is contained in:
Nilay Vaish 2011-03-22 06:41:54 -05:00
parent 46cce440be
commit 1764ebbf30
23 changed files with 208 additions and 166 deletions

View file

@ -267,9 +267,9 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@ -338,7 +338,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
@ -355,7 +355,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
}
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
@ -373,7 +373,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
@ -391,7 +391,7 @@ machine(L1Cache, "MSI Directory L1 Cache CMP")
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:UPGRADE;

View file

@ -181,9 +181,9 @@ machine(L1Cache, "MI Example L1 Cache")
}
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
if (is_invalid(cache_entry) &&
@ -281,7 +281,7 @@ machine(L1Cache, "MI Example L1 Cache")
}
action(p_profileMiss, "p", desc="Profile cache miss") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
cacheMemory.profileMiss(in_msg);
}
}

View file

@ -303,9 +303,9 @@ machine(L1Cache, "Directory protocol")
// Nothing from the unblock network
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
@ -380,7 +380,7 @@ machine(L1Cache, "Directory protocol")
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
@ -396,7 +396,7 @@ machine(L1Cache, "Directory protocol")
}
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
@ -820,7 +820,7 @@ machine(L1Cache, "Directory protocol")
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
// profile_miss(in_msg);
}
}

View file

@ -622,9 +622,9 @@ machine(L1Cache, "Token protocol")
}
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
TBE tbe := L1_TBEs[in_msg.LineAddress];
@ -1310,7 +1310,7 @@ machine(L1Cache, "Token protocol")
L1_TBEs.allocate(address);
set_tbe(L1_TBEs[address]);
tbe.IssueCount := 0;
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
if (in_msg.Type == RubyRequestType:ATOMIC) {
@ -1323,7 +1323,7 @@ machine(L1Cache, "Token protocol")
}
action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
}
}
@ -1499,7 +1499,7 @@ machine(L1Cache, "Token protocol")
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.profileMiss(in_msg);
} else {
@ -1516,7 +1516,7 @@ machine(L1Cache, "Token protocol")
}
action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
}
stall_and_wait(mandatoryQueue_in, address);

View file

@ -352,9 +352,9 @@ machine(L1Cache, "AMD Hammer-like protocol")
// Nothing from the request network
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
TBE tbe := TBEs[in_msg.LineAddress];
@ -695,7 +695,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
cache_entry.DataBlk);
@ -1022,7 +1022,7 @@ machine(L1Cache, "AMD Hammer-like protocol")
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
if (L1IcacheMemory.isTagPresent(address)) {
L1IcacheMemory.profileMiss(in_msg);
} else if (L1DcacheMemory.isTagPresent(address)) {

View file

@ -132,9 +132,9 @@ machine(L1Cache, "Network_test L1 Cache")
out_port(responseNetwork_out, RequestMsg, responseFromCache);
// Mandatory Queue
in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, CacheMsg) {
peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
getCacheEntry(in_msg.LineAddress),

View file

@ -213,17 +213,6 @@ enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
L2_HW, desc="This is a L2 hardware prefetch";
}
// CacheMsg
structure(CacheMsg, desc="...", interface="Message") {
Address LineAddress, desc="Line address for this request";
Address PhysicalAddress, desc="Physical address for this request";
RubyRequestType Type, desc="Type of request (LD, ST, etc)";
Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
RubyAccessMode AccessMode, desc="user/supervisor access type";
int Size, desc="size in bytes of access";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
// CacheMsg
structure(SequencerMsg, desc="...", interface="Message") {
Address LineAddress, desc="Line address for this request";

View file

@ -34,10 +34,10 @@ void profileCacheCLBsize(int size, int numStaleI);
void profileMemoryCLBsize(int size, int numStaleI);
// used by 2level exclusive cache protocols
void profile_miss(CacheMsg msg);
void profile_miss(RubyRequest msg);
// used by non-fast path protocols
void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
void profile_L1Cache_miss(RubyRequest msg, NodeID l1cacheID);
// used by CMP protocols
void profile_request(std::string L1CacheStateStr, std::string L2CacheStateStr,

View file

@ -109,6 +109,16 @@ structure (Sequencer, external = "yes") {
void profileNack(Address, int, int, uint64);
}
structure(RubyRequest, desc="...", interface="Message", external="yes") {
Address LineAddress, desc="Line address for this request";
Address PhysicalAddress, desc="Physical address for this request";
RubyRequestType Type, desc="Type of request (LD, ST, etc)";
Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
RubyAccessMode AccessMode, desc="user/supervisor access type";
int Size, desc="size in bytes of access";
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
external_type(AbstractEntry, primitive="yes");
structure (DirectoryMemory, external = "yes") {
@ -126,7 +136,7 @@ structure (CacheMemory, external = "yes") {
void deallocate(Address);
AbstractCacheEntry lookup(Address);
bool isTagPresent(Address);
void profileMiss(CacheMsg);
void profileMiss(RubyRequest);
void profileGenericRequest(GenericRequestType,
RubyAccessMode,

View file

@ -29,7 +29,7 @@
#include <vector>
#include "base/stl_helpers.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/system/System.hh"

View file

@ -33,7 +33,7 @@
#include "base/hashmap.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Histogram.hh"

View file

@ -51,7 +51,7 @@
#include "base/stl_helpers.hh"
#include "base/str.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/RubyRequest.hh"
#include "mem/protocol/MachineType.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/network/Network.hh"
@ -535,7 +535,7 @@ Profiler::clearStats()
}
void
Profiler::addAddressTraceSample(const CacheMsg& msg, NodeID id)
Profiler::addAddressTraceSample(const RubyRequest& msg, NodeID id)
{
if (msg.getType() != RubyRequestType_IFETCH) {
// Note: The following line should be commented out if you

View file

@ -68,7 +68,7 @@
#include "params/RubyProfiler.hh"
#include "sim/sim_object.hh"
class CacheMsg;
class RubyRequest;
class AddressProfiler;
class Profiler : public SimObject, public Consumer
@ -93,7 +93,7 @@ class Profiler : public SimObject, public Consumer
AddressProfiler* getAddressProfiler() { return m_address_profiler_ptr; }
AddressProfiler* getInstructionProfiler() { return m_inst_profiler_ptr; }
void addAddressTraceSample(const CacheMsg& msg, NodeID id);
void addAddressTraceSample(const RubyRequest& msg, NodeID id);
void profileRequest(const std::string& requestStr);
void profileSharing(const Address& addr, AccessType type,

View file

@ -26,7 +26,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/recorder/TraceRecord.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"

View file

@ -4,34 +4,17 @@
using namespace std;
ostream&
operator<<(ostream& out, const RubyRequest& obj)
void
RubyRequest::print(ostream& out) const
{
out << hex << "0x" << obj.paddr << " data: 0x" << flush;
for (int i = 0; i < obj.len; i++) {
out << (int)obj.data[i];
}
out << dec << " type: " << RubyRequestType_to_string(obj.type) << endl;
return out;
}
vector<string>
tokenizeString(string str, string delims)
{
vector<string> tokens;
char* pch;
char* tmp;
const char* c_delims = delims.c_str();
tmp = new char[str.length()+1];
strcpy(tmp, str.c_str());
pch = strtok(tmp, c_delims);
while (pch != NULL) {
string tmp_str(pch);
if (tmp_str == "null") tmp_str = "";
tokens.push_back(tmp_str);
pch = strtok(NULL, c_delims);
}
delete [] tmp;
return tokens;
out << "[RubyRequest: ";
out << "LineAddress = " << m_LineAddress << " ";
out << "PhysicalAddress = " << m_PhysicalAddress << " ";
out << "Type = " << m_Type << " ";
out << "ProgramCounter = " << m_ProgramCounter << " ";
out << "AccessMode = " << m_AccessMode << " ";
out << "Size = " << m_Size << " ";
out << "Prefetch = " << m_Prefetch << " ";
// out << "Time = " << getTime() << " ";
out << "]";
}

View file

@ -40,40 +40,102 @@
typedef void* RubyPortHandle;
class RubyRequest
class RubyRequest : public Message
{
public:
uint64_t paddr;
Address m_PhysicalAddress;
Address m_LineAddress;
RubyRequestType m_Type;
Address m_ProgramCounter;
RubyAccessMode m_AccessMode;
int m_Size;
PrefetchBit m_Prefetch;
uint8_t* data;
int len;
uint64_t pc;
RubyRequestType type;
RubyAccessMode access_mode;
PacketPtr pkt;
unsigned proc_id;
RubyRequest() {}
RubyRequest(uint64_t _paddr,
uint8_t* _data,
int _len,
uint64_t _pc,
RubyRequestType _type,
RubyAccessMode _access_mode,
PacketPtr _pkt,
unsigned _proc_id = 100)
: paddr(_paddr),
RubyRequest(uint64_t _paddr, uint8_t* _data, int _len, uint64_t _pc,
RubyRequestType _type, RubyAccessMode _access_mode,
PacketPtr _pkt, PrefetchBit _pb = PrefetchBit_No,
unsigned _proc_id = 100)
: m_PhysicalAddress(_paddr),
m_Type(_type),
m_ProgramCounter(_pc),
m_AccessMode(_access_mode),
m_Size(_len),
m_Prefetch(_pb),
data(_data),
len(_len),
pc(_pc),
type(_type),
access_mode(_access_mode),
pkt(_pkt),
proc_id(_proc_id)
{}
{
m_LineAddress = m_PhysicalAddress;
m_LineAddress.makeLineAddress();
}
static RubyRequest*
create()
{
return new RubyRequest();
}
RubyRequest*
clone() const
{
return new RubyRequest(*this);
}
const Address&
getLineAddress() const
{
return m_LineAddress;
}
const Address&
getPhysicalAddress() const
{
return m_PhysicalAddress;
}
const RubyRequestType&
getType() const
{
return m_Type;
}
const Address&
getProgramCounter() const
{
return m_ProgramCounter;
}
const RubyAccessMode&
getAccessMode() const
{
return m_AccessMode;
}
const int&
getSize() const
{
return m_Size;
}
const PrefetchBit&
getPrefetch() const
{
return m_Prefetch;
}
void print(std::ostream& out) const;
};
std::ostream& operator<<(std::ostream& out, const RubyRequest& obj);
inline std::ostream&
operator<<(std::ostream& out, const RubyRequest& obj)
{
obj.print(out);
out << std::flush;
return out;
}
#endif

View file

@ -56,8 +56,8 @@ void profile_request(const std::string& L1CacheStateStr,
const std::string& L2CacheStateStr,
const std::string& directoryStateStr,
const std::string& requestTypeStr);
void profile_miss(const CacheMsg& msg, NodeID id);
void profile_L1Cache_miss(const CacheMsg& msg, NodeID id);
void profile_miss(const RubyRequest& msg, NodeID id);
void profile_L1Cache_miss(const RubyRequest& msg, NodeID id);
void profile_token_retry(const Address& addr, AccessType type, int count);
void profile_filter_action(int action);
void profile_persistent_prediction(const Address& addr, AccessType type);

View file

@ -36,8 +36,6 @@
#include <cassert>
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/Directory_State.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/L1Cache_State.hh"

View file

@ -344,7 +344,7 @@ CacheMemory::setMRU(const Address& address)
}
void
CacheMemory::profileMiss(const CacheMsg& msg)
CacheMemory::profileMiss(const RubyRequest& msg)
{
m_profiler_ptr->addCacheStatSample(msg.getType(),
msg.getAccessMode(),

View file

@ -35,7 +35,7 @@
#include "base/hashmap.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/RubyRequest.hh"
#include "mem/protocol/RubyRequestType.hh"
#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/MachineType.hh"
@ -107,7 +107,7 @@ class CacheMemory : public SimObject
// Set this address to most recently used
void setMRU(const Address& address);
void profileMiss(const CacheMsg & msg);
void profileMiss(const RubyRequest & msg);
void profileGenericRequest(GenericRequestType requestType,
RubyAccessMode accessType,

View file

@ -53,11 +53,11 @@ DMASequencer::makeRequest(const RubyRequest &request)
return RequestStatus_BufferFull;
}
uint64_t paddr = request.paddr;
uint64_t paddr = request.m_PhysicalAddress.getAddress();
uint8_t* data = request.data;
int len = request.len;
int len = request.m_Size;
bool write = false;
switch(request.type) {
switch(request.m_Type) {
case RubyRequestType_LD:
write = false;
break;

View file

@ -253,7 +253,7 @@ RubyPort::M5Port::recvTiming(PacketPtr pkt)
pkt->getSize(), pc, type,
RubyAccessMode_Supervisor, pkt);
assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <=
assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <=
RubySystem::getBlockSizeBytes());
// Submit the ruby request

View file

@ -29,7 +29,6 @@
#include "base/str.hh"
#include "base/misc.hh"
#include "cpu/testers/rubytest/RubyTester.hh"
#include "mem/protocol/CacheMsg.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
@ -104,7 +103,7 @@ Sequencer::wakeup()
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_readRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
request->ruby_request.paddr, m_readRequestTable.size(),
request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
@ -119,7 +118,7 @@ Sequencer::wakeup()
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_writeRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
request->ruby_request.paddr, m_writeRequestTable.size(),
request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
@ -227,15 +226,15 @@ Sequencer::insertRequest(SequencerRequest* request)
schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
}
Address line_addr(request->ruby_request.paddr);
Address line_addr(request->ruby_request.m_PhysicalAddress);
line_addr.makeLineAddress();
if ((request->ruby_request.type == RubyRequestType_ST) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
(request->ruby_request.type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
if ((request->ruby_request.m_Type == RubyRequestType_ST) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
(request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
@ -288,15 +287,15 @@ Sequencer::removeRequest(SequencerRequest* srequest)
m_writeRequestTable.size() + m_readRequestTable.size());
const RubyRequest & ruby_request = srequest->ruby_request;
Address line_addr(ruby_request.paddr);
Address line_addr(ruby_request.m_PhysicalAddress);
line_addr.makeLineAddress();
if ((ruby_request.type == RubyRequestType_ST) ||
(ruby_request.type == RubyRequestType_RMW_Read) ||
(ruby_request.type == RubyRequestType_RMW_Write) ||
(ruby_request.type == RubyRequestType_Load_Linked) ||
(ruby_request.type == RubyRequestType_Store_Conditional) ||
(ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
(ruby_request.type == RubyRequestType_Locked_RMW_Write)) {
if ((ruby_request.m_Type == RubyRequestType_ST) ||
(ruby_request.m_Type == RubyRequestType_RMW_Read) ||
(ruby_request.m_Type == RubyRequestType_RMW_Write) ||
(ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
(ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
m_writeRequestTable.erase(line_addr);
} else {
m_readRequestTable.erase(line_addr);
@ -314,7 +313,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
// longer locked.
//
bool success = true;
if (request->ruby_request.type == RubyRequestType_Store_Conditional) {
if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) {
if (!m_dataCache_ptr->isLocked(address, m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
@ -333,7 +332,7 @@ Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
// Independent of success, all SC operations must clear the lock
//
m_dataCache_ptr->clearLocked(address);
} else if (request->ruby_request.type == RubyRequestType_Load_Linked) {
} else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
@ -380,13 +379,13 @@ Sequencer::writeCallback(const Address& address,
m_writeRequestTable.erase(i);
markRemoved();
assert((request->ruby_request.type == RubyRequestType_ST) ||
(request->ruby_request.type == RubyRequestType_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_RMW_Write) ||
(request->ruby_request.type == RubyRequestType_Load_Linked) ||
(request->ruby_request.type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Read) ||
(request->ruby_request.type == RubyRequestType_Locked_RMW_Write));
assert((request->ruby_request.m_Type == RubyRequestType_ST) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
(request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write));
//
// For Alpha, properly handle LL, SC, and write requests with respect to
@ -398,9 +397,9 @@ Sequencer::writeCallback(const Address& address,
if(!m_usingNetworkTester)
success = handleLlsc(address, request);
if (request->ruby_request.type == RubyRequestType_Locked_RMW_Read) {
if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) {
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
} else if (request->ruby_request.type == RubyRequestType_Locked_RMW_Write) {
} else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) {
m_controller->unblock(address);
}
@ -440,8 +439,8 @@ Sequencer::readCallback(const Address& address,
m_readRequestTable.erase(i);
markRemoved();
assert((request->ruby_request.type == RubyRequestType_LD) ||
(request->ruby_request.type == RubyRequestType_IFETCH));
assert((request->ruby_request.m_Type == RubyRequestType_LD) ||
(request->ruby_request.m_Type == RubyRequestType_IFETCH));
hitCallback(request, mach, data, true,
initialRequestTime, forwardRequestTime, firstResponseTime);
@ -457,10 +456,10 @@ Sequencer::hitCallback(SequencerRequest* srequest,
Time firstResponseTime)
{
const RubyRequest & ruby_request = srequest->ruby_request;
Address request_address(ruby_request.paddr);
Address request_line_address(ruby_request.paddr);
Address request_address(ruby_request.m_PhysicalAddress);
Address request_line_address(ruby_request.m_PhysicalAddress);
request_line_address.makeLineAddress();
RubyRequestType type = ruby_request.type;
RubyRequestType type = ruby_request.m_Type;
Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
@ -498,7 +497,7 @@ Sequencer::hitCallback(SequencerRequest* srequest,
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %d cycles\n",
g_eventQueue_ptr->getTime(), m_version, "Seq",
success ? "Done" : "SC_Failed", "", "",
Address(ruby_request.paddr), miss_latency);
ruby_request.m_PhysicalAddress, miss_latency);
}
#if 0
if (request.getPrefetch() == PrefetchBit_Yes) {
@ -514,11 +513,11 @@ Sequencer::hitCallback(SequencerRequest* srequest,
(type == RubyRequestType_Locked_RMW_Read) ||
(type == RubyRequestType_Load_Linked)) {
memcpy(ruby_request.data,
data.getData(request_address.getOffset(), ruby_request.len),
ruby_request.len);
data.getData(request_address.getOffset(), ruby_request.m_Size),
ruby_request.m_Size);
} else {
data.setData(ruby_request.data, request_address.getOffset(),
ruby_request.len);
ruby_request.m_Size);
}
} else {
DPRINTF(MemoryAccess,
@ -548,21 +547,21 @@ RequestStatus
Sequencer::getRequestStatus(const RubyRequest& request)
{
bool is_outstanding_store =
!!m_writeRequestTable.count(line_address(Address(request.paddr)));
!!m_writeRequestTable.count(line_address(request.m_PhysicalAddress));
bool is_outstanding_load =
!!m_readRequestTable.count(line_address(Address(request.paddr)));
!!m_readRequestTable.count(line_address(request.m_PhysicalAddress));
if (is_outstanding_store) {
if ((request.type == RubyRequestType_LD) ||
(request.type == RubyRequestType_IFETCH) ||
(request.type == RubyRequestType_RMW_Read)) {
if ((request.m_Type == RubyRequestType_LD) ||
(request.m_Type == RubyRequestType_IFETCH) ||
(request.m_Type == RubyRequestType_RMW_Read)) {
m_store_waiting_on_load_cycles++;
} else {
m_store_waiting_on_store_cycles++;
}
return RequestStatus_Aliased;
} else if (is_outstanding_load) {
if ((request.type == RubyRequestType_ST) ||
(request.type == RubyRequestType_RMW_Write)) {
if ((request.m_Type == RubyRequestType_ST) ||
(request.m_Type == RubyRequestType_RMW_Write)) {
m_load_waiting_on_store_cycles++;
} else {
m_load_waiting_on_load_cycles++;
@ -586,7 +585,7 @@ Sequencer::empty() const
RequestStatus
Sequencer::makeRequest(const RubyRequest &request)
{
assert(Address(request.paddr).getOffset() + request.len <=
assert(request.m_PhysicalAddress.getOffset() + request.m_Size <=
RubySystem::getBlockSizeBytes());
RequestStatus status = getRequestStatus(request);
if (status != RequestStatus_Ready)
@ -610,11 +609,10 @@ Sequencer::makeRequest(const RubyRequest &request)
void
Sequencer::issueRequest(const RubyRequest& request)
{
// TODO: get rid of CacheMsg, RubyRequestType, and
// AccessModeTYpe, & have SLICC use RubyRequest and subtypes
// natively
// TODO: Eliminate RubyRequest being copied again.
RubyRequestType ctype;
switch(request.type) {
switch(request.m_Type) {
case RubyRequestType_IFETCH:
ctype = RubyRequestType_IFETCH;
break;
@ -651,7 +649,7 @@ Sequencer::issueRequest(const RubyRequest& request)
}
RubyAccessMode amtype;
switch(request.access_mode){
switch(request.m_AccessMode){
case RubyAccessMode_User:
amtype = RubyAccessMode_User;
break;
@ -665,19 +663,21 @@ Sequencer::issueRequest(const RubyRequest& request)
assert(0);
}
Address line_addr(request.paddr);
Address line_addr(request.m_PhysicalAddress);
line_addr.makeLineAddress();
CacheMsg *msg = new CacheMsg(line_addr, Address(request.paddr), ctype,
Address(request.pc), amtype, request.len, PrefetchBit_No,
request.proc_id);
RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(),
request.data, request.m_Size,
request.m_ProgramCounter.getAddress(),
ctype, amtype, request.pkt,
PrefetchBit_No, request.proc_id);
DPRINTFR(ProtocolTrace, "%7s %3s %10s%20s %6s>%-6s %s %s\n",
g_eventQueue_ptr->getTime(), m_version, "Seq", "Begin", "", "",
Address(request.paddr), RubyRequestType_to_string(request.type));
request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type));
Time latency = 0; // initialzed to an null value
if (request.type == RubyRequestType_IFETCH)
if (request.m_Type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
else
latency = m_dataCache_ptr->getLatency();