inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource but at the same time the pipeline stages had visibility to see what happened to a resource request. Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw away the request too early or the pipeline stage gets bad information. Instead, mark when a request is done with the resource all together and then let the pipeline stage call back to the resource that it's time to free up the bandwidth for more instructions *** inteface notes *** - When an instruction completes and is done in a resource for that cycle, call done() - When an instruction fails and is done with a resource for that cycle, call done(false) - When an instruction completes, but isnt finished with a resource, call completed() - When an instruction fails, but isnt finished with a resource, call completed(false) * * * inorder: tlbmiss wakeup bug fix
This commit is contained in:
parent
d64226750e
commit
d5961b2b20
|
@ -940,7 +940,9 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed)
|
|||
ResReqPtr req = cpu->resPool->request(res_num, inst);
|
||||
assert(req->valid);
|
||||
|
||||
if (req->isCompleted()) {
|
||||
bool req_completed = req->isCompleted();
|
||||
bool done_in_pipeline = false;
|
||||
if (req_completed) {
|
||||
DPRINTF(InOrderStage, "[tid:%i]: [sn:%i] request to %s "
|
||||
"completed.\n", tid, inst->seqNum,
|
||||
cpu->resPool->name(res_num));
|
||||
|
@ -949,11 +951,10 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed)
|
|||
|
||||
req->stagePasses++;
|
||||
|
||||
bool done_in_pipeline = inst->finishSkedEntry();
|
||||
done_in_pipeline = inst->finishSkedEntry();
|
||||
if (done_in_pipeline) {
|
||||
DPRINTF(InOrderDynInst, "[tid:%i]: [sn:%i] finished "
|
||||
"in pipeline.\n", tid, inst->seqNum);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DPRINTF(InOrderStage, "[tid:%i]: [sn:%i] request to %s failed."
|
||||
|
@ -990,7 +991,18 @@ PipelineStage::processInstSchedule(DynInstPtr inst,int &reqs_processed)
|
|||
"thread due to cache miss.\n");
|
||||
cpu->activateNextReadyContext();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If this request is no longer needs to take up bandwidth in the
|
||||
// resource, go ahead and free that bandwidth up
|
||||
if (req->doneInResource) {
|
||||
req->freeSlot();
|
||||
}
|
||||
|
||||
// No longer need to process this instruction if the last
|
||||
// request it had wasn't completed or if there is nothing
|
||||
// else for it to do in the pipeline
|
||||
if (done_in_pipeline || !req_completed) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ Resource::Resource(string res_name, int res_id, int res_width,
|
|||
|
||||
// Use to deny a instruction a resource.
|
||||
deniedReq = new ResourceRequest(this);
|
||||
deniedReq->valid = true;
|
||||
}
|
||||
|
||||
Resource::~Resource()
|
||||
|
@ -97,6 +98,9 @@ Resource::slotsInUse()
|
|||
void
|
||||
Resource::freeSlot(int slot_idx)
|
||||
{
|
||||
DPRINTF(Resource, "Deallocating [slot:%i].\n",
|
||||
slot_idx);
|
||||
|
||||
// Put slot number on this resource's free list
|
||||
availSlots.push_back(slot_idx);
|
||||
|
||||
|
@ -160,6 +164,9 @@ Resource::request(DynInstPtr inst)
|
|||
slot_num = getSlot(inst);
|
||||
|
||||
if (slot_num != -1) {
|
||||
DPRINTF(Resource, "Allocating [slot:%i] for [tid:%i]: [sn:%i]\n",
|
||||
slot_num, inst->readTid(), inst->seqNum);
|
||||
|
||||
// Get Stage # from Schedule Entry
|
||||
stage_num = inst->curSkedEntry->stageNum;
|
||||
unsigned cmd = inst->curSkedEntry->cmd;
|
||||
|
@ -177,10 +184,12 @@ Resource::request(DynInstPtr inst)
|
|||
inst->readTid());
|
||||
}
|
||||
|
||||
reqs[slot_num] = inst_req;
|
||||
|
||||
try_request = true;
|
||||
} else {
|
||||
DPRINTF(Resource, "No slot available for [tid:%i]: [sn:%i]\n",
|
||||
inst->readTid(), inst->seqNum);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (try_request) {
|
||||
|
@ -352,8 +361,9 @@ int ResourceRequest::resReqID = 0;
|
|||
int ResourceRequest::maxReqCount = 0;
|
||||
|
||||
ResourceRequest::ResourceRequest(Resource *_res)
|
||||
: res(_res), inst(NULL), stagePasses(0), valid(false), complSlotNum(-1),
|
||||
completed(false), squashed(false), processing(false), memStall(false)
|
||||
: res(_res), inst(NULL), stagePasses(0), valid(false), doneInResource(false),
|
||||
complSlotNum(-1), completed(false), squashed(false), processing(false),
|
||||
memStall(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -384,6 +394,19 @@ ResourceRequest::clearRequest()
|
|||
valid = false;
|
||||
inst = NULL;
|
||||
stagePasses = 0;
|
||||
completed = false;
|
||||
doneInResource = false;
|
||||
squashed = false;
|
||||
memStall = false;
|
||||
}
|
||||
|
||||
void
|
||||
ResourceRequest::freeSlot()
|
||||
{
|
||||
assert(res);
|
||||
|
||||
// Free Slot So Another Instruction Can Use This Resource
|
||||
res->freeSlot(slotNum);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -399,13 +422,8 @@ ResourceRequest::done(bool completed)
|
|||
if (completed) {
|
||||
complSlotNum = slotNum;
|
||||
}
|
||||
|
||||
// Free Slot So Another Instruction Can Use This Resource
|
||||
res->freeSlot(slotNum);
|
||||
|
||||
// change slot # to -1, since we check slotNum to see if request is
|
||||
// still valid
|
||||
slotNum = -1;
|
||||
doneInResource = true;
|
||||
}
|
||||
|
||||
ResourceEvent::ResourceEvent()
|
||||
|
|
|
@ -318,6 +318,8 @@ class ResourceRequest
|
|||
*/
|
||||
void done(bool completed = true);
|
||||
|
||||
void freeSlot();
|
||||
|
||||
/////////////////////////////////////////////
|
||||
//
|
||||
// GET RESOURCE REQUEST IDENTIFICATION / INFO
|
||||
|
@ -362,6 +364,8 @@ class ResourceRequest
|
|||
|
||||
bool valid;
|
||||
|
||||
bool doneInResource;
|
||||
|
||||
////////////////////////////////////////
|
||||
//
|
||||
// GET RESOURCE REQUEST STATUS FROM VARIABLES
|
||||
|
|
|
@ -667,13 +667,16 @@ CacheUnit::execute(int slot_num)
|
|||
CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
|
||||
assert(cache_req);
|
||||
|
||||
if (cachePortBlocked) {
|
||||
if (cachePortBlocked &&
|
||||
(cache_req->cmd == InitiateReadData ||
|
||||
cache_req->cmd == InitiateWriteData ||
|
||||
cache_req->cmd == InitSecondSplitRead ||
|
||||
cache_req->cmd == InitSecondSplitWrite)) {
|
||||
DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
|
||||
cache_req->setCompleted(false);
|
||||
cache_req->done(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
DynInstPtr inst = cache_req->inst;
|
||||
#if TRACING_ON
|
||||
ThreadID tid = inst->readTid();
|
||||
|
@ -690,7 +693,12 @@ CacheUnit::execute(int slot_num)
|
|||
acc_type = "read";
|
||||
#endif
|
||||
case InitiateWriteData:
|
||||
|
||||
if (cachePortBlocked) {
|
||||
DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
|
||||
cache_req->done(false);
|
||||
return;
|
||||
}
|
||||
|
||||
DPRINTF(InOrderCachePort,
|
||||
"[tid:%u]: [sn:%i] Initiating data %s access to %s for "
|
||||
"addr. %08p\n", tid, inst->seqNum, acc_type, name(),
|
||||
|
@ -864,7 +872,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
|
|||
"[tid:%i] [sn:%i] cannot access cache, because port "
|
||||
"is blocked. now waiting to retry request\n", tid,
|
||||
inst->seqNum);
|
||||
cache_req->setCompleted(false);
|
||||
cache_req->done(false);
|
||||
cachePortBlocked = true;
|
||||
} else {
|
||||
DPRINTF(InOrderCachePort,
|
||||
|
@ -888,7 +896,7 @@ CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
|
|||
// Make cache request again since access due to
|
||||
// inability to access
|
||||
DPRINTF(InOrderStall, "STALL: \n");
|
||||
cache_req->setCompleted(false);
|
||||
cache_req->done(false);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -911,7 +919,7 @@ CacheUnit::processCacheCompletion(PacketPtr pkt)
|
|||
cache_pkt->cacheReq->getTid(),
|
||||
cache_pkt->cacheReq->seqNum);
|
||||
|
||||
cache_pkt->cacheReq->done();
|
||||
cache_pkt->cacheReq->freeSlot();
|
||||
delete cache_pkt;
|
||||
|
||||
cpu->wakeCPU();
|
||||
|
@ -1075,8 +1083,10 @@ CacheUnitEvent::process()
|
|||
req_ptr->tlbStall = false;
|
||||
|
||||
if (req_ptr->isSquashed()) {
|
||||
req_ptr->done();
|
||||
req_ptr->freeSlot();
|
||||
}
|
||||
|
||||
tlb_res->cpu->wakeCPU();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -100,7 +100,7 @@ FetchSeqUnit::execute(int slot_num)
|
|||
fs_req->done();
|
||||
} else {
|
||||
DPRINTF(InOrderStall, "STALL: [tid:%i]: NPC not valid\n", tid);
|
||||
fs_req->setCompleted(false);
|
||||
fs_req->done(false);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -208,9 +208,9 @@ FetchUnit::execute(int slot_num)
|
|||
CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
|
||||
assert(cache_req);
|
||||
|
||||
if (cachePortBlocked) {
|
||||
if (cachePortBlocked && cache_req->cmd == InitiateFetch) {
|
||||
DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
|
||||
cache_req->setCompleted(false);
|
||||
cache_req->done(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ FetchUnit::execute(int slot_num)
|
|||
// If not, block this request.
|
||||
if (pendingFetch.size() >= fetchBuffSize) {
|
||||
DPRINTF(InOrderCachePort, "No room available in fetch buffer.\n");
|
||||
cache_req->setCompleted(false);
|
||||
cache_req->done();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -405,6 +405,7 @@ FetchUnit::processCacheCompletion(PacketPtr pkt)
|
|||
cache_pkt->cacheReq->seqNum);
|
||||
|
||||
cache_pkt->cacheReq->done();
|
||||
cache_pkt->cacheReq->freeSlot();
|
||||
delete cache_pkt;
|
||||
|
||||
cpu->wakeCPU();
|
||||
|
|
Loading…
Reference in a new issue