inorder: implement split loads

This commit is contained in:
Korey Sewell 2010-01-31 18:30:35 -05:00
parent ea8909925f
commit 6939482c49
8 changed files with 374 additions and 41 deletions

View file

@ -111,7 +111,11 @@ InOrderDynInst::initVars()
{ {
fetchMemReq = NULL; fetchMemReq = NULL;
dataMemReq = NULL; dataMemReq = NULL;
splitMemData = NULL;
split2ndAccess = false;
splitInst = false;
splitFinishCnt = 0;
effAddr = 0; effAddr = 0;
physEffAddr = 0; physEffAddr = 0;
@ -187,6 +191,10 @@ InOrderDynInst::~InOrderDynInst()
delete traceData; delete traceData;
} }
if (splitMemData) {
delete splitMemData;
}
fault = NoFault; fault = NoFault;
--instcount; --instcount;

View file

@ -330,6 +330,19 @@ class InOrderDynInst : public FastAlloc, public RefCounted
public: public:
Tick memTime; Tick memTime;
PacketDataPtr splitMemData;
RequestPtr splitMemReq;
int splitTotalSize;
int split2ndSize;
Addr split2ndAddr;
bool split2ndAccess;
uint8_t split2ndData;
PacketDataPtr split2ndDataPtr;
unsigned split2ndFlags;
bool splitInst;
int splitFinishCnt;
//////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////
// //
// BASE INSTRUCTION INFORMATION. // BASE INSTRUCTION INFORMATION.
@ -468,7 +481,10 @@ class InOrderDynInst : public FastAlloc, public RefCounted
if (!resSched.empty()) { if (!resSched.empty()) {
ThePipeline::ScheduleEntry* sked = resSched.top(); ThePipeline::ScheduleEntry* sked = resSched.top();
resSched.pop(); resSched.pop();
delete sked; if (sked != 0) {
delete sked;
}
} }
} }

View file

@ -53,8 +53,8 @@ namespace ThePipeline {
const unsigned StageWidth = 1; const unsigned StageWidth = 1;
const unsigned BackEndStartStage = 2; const unsigned BackEndStartStage = 2;
// Enumerated List of Resources The Pipeline Uses // List of Resources The Pipeline Uses
enum ResourceList { enum ResourceId {
FetchSeq = 0, FetchSeq = 0,
ICache, ICache,
Decode, Decode,
@ -94,6 +94,7 @@ namespace ThePipeline {
stageNum(stage_num), resNum(res_num), cmd(_cmd), stageNum(stage_num), resNum(res_num), cmd(_cmd),
idx(_idx), priority(_priority) idx(_idx), priority(_priority)
{ } { }
virtual ~ScheduleEntry(){} virtual ~ScheduleEntry(){}
// Stage number to perform this service. // Stage number to perform this service.
@ -159,7 +160,6 @@ namespace ThePipeline {
stageNum, nextTaskPriority++, unit, request, param stageNum, nextTaskPriority++, unit, request, param
)); ));
} }
}; };
}; };

View file

@ -262,15 +262,22 @@ Resource::findRequest(DynInstPtr inst)
map<int, ResReqPtr>::iterator map_it = reqMap.begin(); map<int, ResReqPtr>::iterator map_it = reqMap.begin();
map<int, ResReqPtr>::iterator map_end = reqMap.end(); map<int, ResReqPtr>::iterator map_end = reqMap.end();
bool found = false;
ResReqPtr req = NULL;
while (map_it != map_end) { while (map_it != map_end) {
if ((*map_it).second && if ((*map_it).second &&
(*map_it).second->getInst() == inst) { (*map_it).second->getInst() == inst) {
return (*map_it).second; req = (*map_it).second;
//return (*map_it).second;
assert(found == false);
found = true;
} }
map_it++; map_it++;
} }
return NULL; return req;
//return NULL;
} }
void void

View file

@ -181,6 +181,25 @@ ResourcePool::getResIdx(const std::string &res_name)
return idx; return idx;
} }
panic("Can't find resource idx for: %s\n", res_name);
return 0;
}
unsigned
ResourcePool::getResIdx(const ThePipeline::ResourceId &res_id)
{
int num_resources = resources.size();
for (int idx = 0; idx < num_resources; idx++) {
if (resources[idx]->getId() == res_id)
return idx;
}
// todo: change return value to int and return a -1 here
// maybe even have enumerated type
// panic for now...
panic("Can't find resource idx for: %i\n", res_id);
return 0; return 0;
} }

View file

@ -141,6 +141,7 @@ class ResourcePool {
/** Returns a specific resource. */ /** Returns a specific resource. */
unsigned getResIdx(const std::string &res_name); unsigned getResIdx(const std::string &res_name);
unsigned getResIdx(const ThePipeline::ResourceId &res_id);
/** Returns a pointer to a resource */ /** Returns a pointer to a resource */
Resource* getResource(int res_idx) { return resources[res_idx]; } Resource* getResource(int res_idx) { return resources[res_idx]; }

View file

@ -40,6 +40,7 @@
#include "cpu/inorder/resources/cache_unit.hh" #include "cpu/inorder/resources/cache_unit.hh"
#include "cpu/inorder/pipeline_traits.hh" #include "cpu/inorder/pipeline_traits.hh"
#include "cpu/inorder/cpu.hh" #include "cpu/inorder/cpu.hh"
#include "cpu/inorder/resource_pool.hh"
#include "mem/request.hh" #include "mem/request.hh"
using namespace std; using namespace std;
@ -136,7 +137,9 @@ CacheUnit::getSlot(DynInstPtr inst)
return -1; return -1;
} }
if (!inst->validMemAddr()) { // For a Split-Load, the instruction would have processed once already
// causing the address to be unset.
if (!inst->validMemAddr() && !inst->splitInst) {
panic("Mem. Addr. must be set before requesting cache access\n"); panic("Mem. Addr. must be set before requesting cache access\n");
} }
@ -159,12 +162,24 @@ CacheUnit::getSlot(DynInstPtr inst)
inst->readTid(), inst->seqNum, req_addr); inst->readTid(), inst->seqNum, req_addr);
return new_slot; return new_slot;
} else { } else {
DPRINTF(InOrderCachePort, // Allow same instruction multiple accesses to same address
if (addrMap[tid][req_addr] == inst->seqNum) {
int new_slot = Resource::getSlot(inst);
if (new_slot == -1)
return -1;
return new_slot;
} else {
DPRINTF(InOrderCachePort,
"[tid:%i] Denying request because there is an outstanding" "[tid:%i] Denying request because there is an outstanding"
" request to/for addr. %08p. by [sn:%i] @ tick %i\n", " request to/for addr. %08p. by [sn:%i] @ tick %i\n",
inst->readTid(), req_addr, addrMap[tid][req_addr], inst->memTime); inst->readTid(), req_addr, addrMap[tid][req_addr], inst->memTime);
return -1; return -1;
}
} }
return -1;
} }
void void
@ -175,17 +190,69 @@ CacheUnit::freeSlot(int slot_num)
vector<Addr>::iterator vect_it = vector<Addr>::iterator vect_it =
find(addrList[tid].begin(), addrList[tid].end(), find(addrList[tid].begin(), addrList[tid].end(),
reqMap[slot_num]->inst->getMemAddr()); reqMap[slot_num]->inst->getMemAddr());
assert(vect_it != addrList[tid].end());
assert(vect_it != addrList[tid].end() ||
reqMap[slot_num]->inst->splitInst);
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
"[tid:%i]: Address %08p removed from dependency list\n", "[tid:%i]: Address %08p removed from dependency list\n",
reqMap[slot_num]->inst->readTid(), (*vect_it)); reqMap[slot_num]->inst->readTid(), (*vect_it));
addrList[tid].erase(vect_it); if (vect_it != addrList[tid].end()) {
DPRINTF(InOrderCachePort,
"[tid:%i]: Address %08p removed from dependency list\n",
reqMap[slot_num]->inst->readTid(), (*vect_it));
addrList[tid].erase(vect_it);
}
Resource::freeSlot(slot_num); Resource::freeSlot(slot_num);
} }
ResReqPtr
CacheUnit::findRequest(DynInstPtr inst)
{
map<int, ResReqPtr>::iterator map_it = reqMap.begin();
map<int, ResReqPtr>::iterator map_end = reqMap.end();
while (map_it != map_end) {
CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
assert(cache_req);
if (cache_req &&
cache_req->getInst() == inst &&
cache_req->instIdx == inst->resSched.top()->idx) {
return cache_req;
}
map_it++;
}
return NULL;
}
ResReqPtr
CacheUnit::findSplitRequest(DynInstPtr inst, int idx)
{
map<int, ResReqPtr>::iterator map_it = reqMap.begin();
map<int, ResReqPtr>::iterator map_end = reqMap.end();
while (map_it != map_end) {
CacheRequest* cache_req = dynamic_cast<CacheRequest*>((*map_it).second);
assert(cache_req);
if (cache_req &&
cache_req->getInst() == inst &&
cache_req->instIdx == idx) {
return cache_req;
}
map_it++;
}
return NULL;
}
ResReqPtr ResReqPtr
CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx, CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
int slot_num, unsigned cmd) int slot_num, unsigned cmd)
@ -200,6 +267,14 @@ CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
switch (sched_entry->cmd) switch (sched_entry->cmd)
{ {
case InitSecondSplitRead:
pkt_cmd = MemCmd::ReadReq;
DPRINTF(InOrderCachePort,
"[tid:%i]: Read request from [sn:%i] for addr %08p\n",
inst->readTid(), inst->seqNum, inst->split2ndAddr);
break;
case InitiateReadData: case InitiateReadData:
pkt_cmd = MemCmd::ReadReq; pkt_cmd = MemCmd::ReadReq;
@ -231,7 +306,8 @@ CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
return new CacheRequest(this, inst, stage_num, id, slot_num, return new CacheRequest(this, inst, stage_num, id, slot_num,
sched_entry->cmd, 0, pkt_cmd, sched_entry->cmd, 0, pkt_cmd,
0/*flags*/, this->cpu->readCpuId()); 0/*flags*/, this->cpu->readCpuId(),
inst->resSched.top()->idx);
} }
void void
@ -242,7 +318,8 @@ CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
// Check to see if this instruction is requesting the same command // Check to see if this instruction is requesting the same command
// or a different one // or a different one
if (cache_req->cmd != inst->resSched.top()->cmd) { if (cache_req->cmd != inst->resSched.top()->cmd &&
cache_req->instIdx == inst->resSched.top()->idx) {
// If different, then update command in the request // If different, then update command in the request
cache_req->cmd = inst->resSched.top()->cmd; cache_req->cmd = inst->resSched.top()->cmd;
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
@ -250,7 +327,7 @@ CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
"instruction\n ", inst->readTid(), inst->seqNum); "instruction\n ", inst->readTid(), inst->seqNum);
service_request = true; service_request = true;
} else { } else if (inst->resSched.top()->idx != CacheUnit::InitSecondSplitRead) {
// If same command, just check to see if memory access was completed // If same command, just check to see if memory access was completed
// but dont try to re-execute // but dont try to re-execute
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
@ -276,12 +353,25 @@ CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
cpu->readCpuId(), inst->readTid()); cpu->readCpuId(), inst->readTid());
cache_req->memReq = inst->fetchMemReq; cache_req->memReq = inst->fetchMemReq;
} else { } else {
inst->dataMemReq = new Request(inst->readTid(), aligned_addr, if (!cache_req->is2ndSplit()) {
inst->dataMemReq = new Request(cpu->asid[tid], aligned_addr,
acc_size, flags, inst->readPC(), acc_size, flags, inst->readPC(),
cpu->readCpuId(), inst->readTid()); cpu->readCpuId(), inst->readTid());
cache_req->memReq = inst->dataMemReq; cache_req->memReq = inst->dataMemReq;
} else {
assert(inst->splitInst);
inst->splitMemReq = new Request(cpu->asid[tid],
inst->split2ndAddr,
acc_size,
flags,
inst->readPC(),
cpu->readCpuId(),
tid);
cache_req->memReq = inst->splitMemReq;
}
} }
cache_req->fault = cache_req->fault =
_tlb->translateAtomic(cache_req->memReq, _tlb->translateAtomic(cache_req->memReq,
@ -318,14 +408,94 @@ CacheUnit::read(DynInstPtr inst, Addr addr, T &data, unsigned flags)
CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst)); CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
assert(cache_req); assert(cache_req);
int acc_size = sizeof(T); // The block size of our peer
doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Read); unsigned blockSize = this->cachePort->peerBlockSize();
//The size of the data we're trying to read.
int dataSize = sizeof(T);
if (inst->split2ndAccess) {
dataSize = inst->split2ndSize;
cache_req->splitAccess = true;
cache_req->split2ndAccess = true;
DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (2 of 2) for (%#x, %#x).\n", curTick, inst->seqNum,
inst->getMemAddr(), inst->split2ndAddr);
}
//The address of the second part of this access if it needs to be split
//across a cache line boundary.
Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
if (secondAddr > addr && !inst->split2ndAccess) {
DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for (%#x, %#x).\n", curTick, inst->seqNum,
addr, secondAddr);
// Save All "Total" Split Information
// ==============================
inst->splitInst = true;
inst->splitMemData = new uint8_t[dataSize];
inst->splitTotalSize = dataSize;
// Schedule Split Read/Complete for Instruction
// ==============================
int stage_num = cache_req->getStageNum();
int stage_pri = ThePipeline::getNextPriority(inst, stage_num);
inst->resSched.push(new ScheduleEntry(stage_num,
stage_pri,
cpu->resPool->getResIdx(DCache),
CacheUnit::InitSecondSplitRead,
1)
);
inst->resSched.push(new ScheduleEntry(stage_num + 1,
1/*stage_pri*/,
cpu->resPool->getResIdx(DCache),
CacheUnit::CompleteSecondSplitRead, 1)
);
// Split Information for First Access
// ==============================
dataSize = secondAddr - addr;
cache_req->splitAccess = true;
// Split Information for Second Access
// ==============================
inst->split2ndSize = addr + sizeof(T) - secondAddr;
inst->split2ndAddr = secondAddr;
inst->split2ndDataPtr = inst->splitMemData + dataSize;
inst->split2ndFlags = flags;
}
//cout << "h1" << endl;
doTLBAccess(inst, cache_req, dataSize, flags, TheISA::TLB::Read);
//cout << "h2" << endl;
if (cache_req->fault == NoFault) { if (cache_req->fault == NoFault) {
cache_req->reqData = new uint8_t[acc_size]; if (!cache_req->splitAccess) {
doCacheAccess(inst, NULL); cache_req->reqData = new uint8_t[dataSize];
doCacheAccess(inst, NULL);
} else {
if (!inst->split2ndAccess) {
cache_req->reqData = inst->splitMemData;
} else {
cache_req->reqData = inst->split2ndDataPtr;
}
doCacheAccess(inst, NULL, cache_req);
}
} }
//cout << "h3" << endl;
return cache_req->fault; return cache_req->fault;
} }
@ -337,6 +507,20 @@ CacheUnit::write(DynInstPtr inst, T data, Addr addr, unsigned flags,
CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst)); CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
assert(cache_req); assert(cache_req);
// The block size of our peer
unsigned blockSize = this->cachePort->peerBlockSize();
//The size of the data we're trying to read.
int dataSize = sizeof(T);
//The address of the second part of this access if it needs to be split
//across a cache line boundary.
Addr secondAddr = roundDown(addr + dataSize - 1, blockSize);
if (secondAddr > addr) {
assert(0 && "Need Split Write Code!");
}
int acc_size = sizeof(T); int acc_size = sizeof(T);
doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Write); doTLBAccess(inst, cache_req, acc_size, flags, TheISA::TLB::Write);
@ -364,6 +548,8 @@ CacheUnit::execute(int slot_num)
#if TRACING_ON #if TRACING_ON
ThreadID tid = inst->readTid(); ThreadID tid = inst->readTid();
int seq_num = inst->seqNum; int seq_num = inst->seqNum;
std::string acc_type = "write";
#endif #endif
cache_req->fault = NoFault; cache_req->fault = NoFault;
@ -395,10 +581,14 @@ CacheUnit::execute(int slot_num)
} }
case InitiateReadData: case InitiateReadData:
#if TRACING_ON
acc_type = "read";
#endif
case InitiateWriteData: case InitiateWriteData:
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
"[tid:%u]: Initiating data access to %s for addr. %08p\n", "[tid:%u]: [sn:%i] Initiating data %s access to %s for addr. %08p\n",
tid, name(), cache_req->inst->getMemAddr()); tid, inst->seqNum, acc_type, name(), cache_req->inst->getMemAddr());
inst->setCurResSlot(slot_num); inst->setCurResSlot(slot_num);
@ -406,10 +596,31 @@ CacheUnit::execute(int slot_num)
inst->execute(); inst->execute();
} else { } else {
inst->initiateAcc(); inst->initiateAcc();
//if (inst->splitAccess) {
// assert(0 && " Marked as spill inst");
//}
} }
break; break;
case InitSecondSplitRead:
DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i] Initiating split data read access to %s for addr. %08p\n",
tid, inst->seqNum, name(), cache_req->inst->split2ndAddr);
inst->split2ndAccess = true;
read(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags);
break;
case InitSecondSplitWrite:
DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i] Initiating split data write access to %s for addr. %08p\n",
tid, inst->seqNum, name(), cache_req->inst->getMemAddr());
assert(0);
inst->split2ndAccess = true;
//write(inst, inst->split2ndAddr, inst->split2ndData, inst->split2ndFlags);
break;
case CompleteFetch: case CompleteFetch:
if (cache_req->isMemAccComplete()) { if (cache_req->isMemAccComplete()) {
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
@ -425,7 +636,7 @@ CacheUnit::execute(int slot_num)
cache_req->done(); cache_req->done();
} else { } else {
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
"[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n", "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
tid, inst->seqNum); tid, inst->seqNum);
DPRINTF(InOrderStall, DPRINTF(InOrderStall,
"STALL: [tid:%i]: Fetch miss from %08p\n", "STALL: [tid:%i]: Fetch miss from %08p\n",
@ -454,6 +665,24 @@ CacheUnit::execute(int slot_num)
} }
break; break;
case CompleteSecondSplitRead:
DPRINTF(InOrderCachePort,
"[tid:%i]: [sn:%i]: Trying to Complete Split Data Read Access\n",
tid, inst->seqNum);
if (cache_req->isMemAccComplete() ||
inst->isDataPrefetch() ||
inst->isInstPrefetch()) {
cache_req->setMemStall(false);
cache_req->done();
} else {
DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
tid, cache_req->inst->split2ndAddr);
cache_req->setCompleted(false);
cache_req->setMemStall(true);
}
break;
default: default:
fatal("Unrecognized command to %s", resName); fatal("Unrecognized command to %s", resName);
} }
@ -498,15 +727,21 @@ CacheUnit::writeHint(DynInstPtr inst)
// @TODO: Split into doCacheRead() and doCacheWrite() // @TODO: Split into doCacheRead() and doCacheWrite()
Fault Fault
CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res) CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res, CacheReqPtr split_req)
{ {
Fault fault = NoFault; Fault fault = NoFault;
#if TRACING_ON #if TRACING_ON
ThreadID tid = inst->readTid(); ThreadID tid = inst->readTid();
#endif #endif
CacheReqPtr cache_req CacheReqPtr cache_req;
= dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
if (split_req == NULL) {
cache_req = dynamic_cast<CacheReqPtr>(reqMap[inst->getCurResSlot()]);
} else{
cache_req = split_req;
}
assert(cache_req); assert(cache_req);
// Check for LL/SC and if so change command // Check for LL/SC and if so change command
@ -522,7 +757,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res)
} }
cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd, cache_req->dataPkt = new CacheReqPacket(cache_req, cache_req->pktCmd,
Packet::Broadcast); Packet::Broadcast, cache_req->instIdx);
if (cache_req->dataPkt->isRead()) { if (cache_req->dataPkt->isRead()) {
cache_req->dataPkt->dataStatic(cache_req->reqData); cache_req->dataPkt->dataStatic(cache_req->reqData);
@ -615,7 +850,16 @@ CacheUnit::processCacheCompletion(PacketPtr pkt)
// Cast to correct request type // Cast to correct request type
CacheRequest *cache_req = dynamic_cast<CacheReqPtr>( CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
findRequest(cache_pkt->cacheReq->getInst())); findSplitRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
if (!cache_req) {
warn(
"[tid:%u]: [sn:%i]: Can't find slot for cache access to addr. %08p\n",
cache_pkt->cacheReq->getInst()->readTid(),
cache_pkt->cacheReq->getInst()->seqNum,
cache_pkt->cacheReq->getInst()->getMemAddr());
}
assert(cache_req); assert(cache_req);
@ -661,9 +905,27 @@ CacheUnit::processCacheCompletion(PacketPtr pkt)
DPRINTF(InOrderCachePort, DPRINTF(InOrderCachePort,
"[tid:%u]: [sn:%i]: Processing cache access\n", "[tid:%u]: [sn:%i]: Processing cache access\n",
tid, inst->seqNum); tid, inst->seqNum);
if (inst->splitInst) {
inst->splitFinishCnt++;
if (inst->splitFinishCnt == 2) {
inst->completeAcc(pkt); cache_req->memReq->setVirt(0/*inst->tid*/,
inst->getMemAddr(),
inst->splitTotalSize,
0,
0);
Packet split_pkt(cache_req->memReq, cache_req->pktCmd,
Packet::Broadcast);
split_pkt.dataStatic(inst->splitMemData);
inst->completeAcc(&split_pkt);
}
} else {
inst->completeAcc(pkt);
}
if (inst->isLoad()) { if (inst->isLoad()) {
assert(cache_pkt->isRead()); assert(cache_pkt->isRead());

View file

@ -72,7 +72,10 @@ class CacheUnit : public Resource
CompleteWriteData, CompleteWriteData,
Fetch, Fetch,
ReadData, ReadData,
WriteData WriteData,
InitSecondSplitRead,
InitSecondSplitWrite,
CompleteSecondSplitRead
}; };
public: public:
@ -124,6 +127,9 @@ class CacheUnit : public Resource
int res_idx, int slot_num, int res_idx, int slot_num,
unsigned cmd); unsigned cmd);
ResReqPtr findRequest(DynInstPtr inst);
ResReqPtr findSplitRequest(DynInstPtr inst, int idx);
void requestAgain(DynInstPtr inst, bool &try_request); void requestAgain(DynInstPtr inst, bool &try_request);
int getSlot(DynInstPtr inst); int getSlot(DynInstPtr inst);
@ -155,7 +161,7 @@ class CacheUnit : public Resource
/** Returns a specific port. */ /** Returns a specific port. */
Port *getPort(const std::string &if_name, int idx); Port *getPort(const std::string &if_name, int idx);
template <class T> template <class T>
Fault read(DynInstPtr inst, Addr addr, T &data, unsigned flags); Fault read(DynInstPtr inst, Addr addr, T &data, unsigned flags);
@ -169,7 +175,7 @@ class CacheUnit : public Resource
/** Read/Write on behalf of an instruction. /** Read/Write on behalf of an instruction.
* curResSlot needs to be a valid value in instruction. * curResSlot needs to be a valid value in instruction.
*/ */
Fault doCacheAccess(DynInstPtr inst, uint64_t *write_result=NULL); Fault doCacheAccess(DynInstPtr inst, uint64_t *write_result=NULL, CacheReqPtr split_req=NULL);
void prefetch(DynInstPtr inst); void prefetch(DynInstPtr inst);
@ -237,17 +243,18 @@ class CacheRequest : public ResourceRequest
public: public:
CacheRequest(CacheUnit *cres, DynInstPtr inst, int stage_num, int res_idx, CacheRequest(CacheUnit *cres, DynInstPtr inst, int stage_num, int res_idx,
int slot_num, unsigned cmd, int req_size, int slot_num, unsigned cmd, int req_size,
MemCmd::Command pkt_cmd, unsigned flags, int cpu_id) MemCmd::Command pkt_cmd, unsigned flags, int cpu_id, int idx)
: ResourceRequest(cres, inst, stage_num, res_idx, slot_num, cmd), : ResourceRequest(cres, inst, stage_num, res_idx, slot_num, cmd),
pktCmd(pkt_cmd), memReq(NULL), reqData(NULL), dataPkt(NULL), pktCmd(pkt_cmd), memReq(NULL), reqData(NULL), dataPkt(NULL),
retryPkt(NULL), memAccComplete(false), memAccPending(false), retryPkt(NULL), memAccComplete(false), memAccPending(false),
tlbStall(false) tlbStall(false), splitAccess(false), splitAccessNum(-1),
split2ndAccess(false), instIdx(idx)
{ } { }
virtual ~CacheRequest() virtual ~CacheRequest()
{ {
if (reqData) { if (reqData && !splitAccess) {
delete [] reqData; delete [] reqData;
} }
} }
@ -261,6 +268,11 @@ class CacheRequest : public ResourceRequest
memAccComplete = completed; memAccComplete = completed;
} }
bool is2ndSplit()
{
return split2ndAccess;
}
bool isMemAccComplete() { return memAccComplete; } bool isMemAccComplete() { return memAccComplete; }
void setMemAccPending(bool pending = true) { memAccPending = pending; } void setMemAccPending(bool pending = true) { memAccPending = pending; }
@ -276,19 +288,27 @@ class CacheRequest : public ResourceRequest
bool memAccComplete; bool memAccComplete;
bool memAccPending; bool memAccPending;
bool tlbStall; bool tlbStall;
bool splitAccess;
int splitAccessNum;
bool split2ndAccess;
int instIdx;
}; };
class CacheReqPacket : public Packet class CacheReqPacket : public Packet
{ {
public: public:
CacheReqPacket(CacheRequest *_req, CacheReqPacket(CacheRequest *_req,
Command _cmd, short _dest) Command _cmd, short _dest, int _idx = 0)
: Packet(_req->memReq, _cmd, _dest), cacheReq(_req) : Packet(_req->memReq, _cmd, _dest), cacheReq(_req), instIdx(_idx)
{ {
} }
CacheRequest *cacheReq; CacheRequest *cacheReq;
int instIdx;
}; };
#endif //__CPU_CACHE_UNIT_HH__ #endif //__CPU_CACHE_UNIT_HH__