2009-02-11 00:49:29 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2007 MIPS Technologies, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Authors: Korey Sewell
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <vector>
|
|
|
|
#include <list>
|
|
|
|
#include "cpu/inorder/resource.hh"
|
|
|
|
#include "cpu/inorder/cpu.hh"
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
Resource::Resource(string res_name, int res_id, int res_width,
|
|
|
|
int res_latency, InOrderCPU *_cpu)
|
|
|
|
: resName(res_name), id(res_id),
|
|
|
|
width(res_width), latency(res_latency), cpu(_cpu)
|
|
|
|
{
|
2011-02-18 20:27:52 +01:00
|
|
|
reqs.resize(width);
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
// Use to deny a instruction a resource.
|
2011-02-18 20:28:30 +01:00
|
|
|
deniedReq = new ResourceRequest(this);
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
deniedReq->valid = true;
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
2009-05-12 21:01:16 +02:00
|
|
|
Resource::~Resource()
|
|
|
|
{
|
2011-02-18 20:29:02 +01:00
|
|
|
if (resourceEvent) {
|
|
|
|
delete [] resourceEvent;
|
|
|
|
}
|
|
|
|
|
2011-02-18 20:29:26 +01:00
|
|
|
delete deniedReq;
|
|
|
|
|
|
|
|
for (int i = 0; i < width; i++) {
|
|
|
|
delete reqs[i];
|
|
|
|
}
|
2009-05-12 21:01:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
void
|
|
|
|
Resource::init()
|
|
|
|
{
|
2011-02-18 20:29:02 +01:00
|
|
|
// If the resource has a zero-cycle (no latency)
|
|
|
|
// function, then no reason to have events
|
|
|
|
// that will process them for the right tick
|
|
|
|
if (latency > 0) {
|
|
|
|
resourceEvent = new ResourceEvent[width];
|
|
|
|
} else {
|
|
|
|
resourceEvent = NULL;
|
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2011-02-18 20:27:52 +01:00
|
|
|
for (int i = 0; i < width; i++) {
|
2011-02-18 20:28:30 +01:00
|
|
|
reqs[i] = new ResourceRequest(this);
|
2011-02-18 20:27:52 +01:00
|
|
|
}
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
initSlots();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::initSlots()
|
|
|
|
{
|
|
|
|
// Add available slot numbers for resource
|
|
|
|
for (int slot_idx = 0; slot_idx < width; slot_idx++) {
|
|
|
|
availSlots.push_back(slot_idx);
|
2011-02-18 20:29:02 +01:00
|
|
|
|
|
|
|
if (resourceEvent) {
|
|
|
|
resourceEvent[slot_idx].init(this, slot_idx);
|
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
Resource::name()
|
|
|
|
{
|
|
|
|
return cpu->name() + "." + resName;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
Resource::slotsAvail()
|
|
|
|
{
|
|
|
|
return availSlots.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
Resource::slotsInUse()
|
|
|
|
{
|
|
|
|
return width - availSlots.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::freeSlot(int slot_idx)
|
|
|
|
{
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
DPRINTF(Resource, "Deallocating [slot:%i].\n",
|
|
|
|
slot_idx);
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
// Put slot number on this resource's free list
|
|
|
|
availSlots.push_back(slot_idx);
|
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
// Invalidate Request & Reset it's flags
|
|
|
|
reqs[slot_idx]->clearRequest();
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
Resource::findSlot(DynInstPtr inst)
|
|
|
|
{
|
|
|
|
int slot_num = -1;
|
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
for (int i = 0; i < width; i++) {
|
|
|
|
if (reqs[i]->valid &&
|
|
|
|
reqs[i]->getInst()->seqNum == inst->seqNum) {
|
|
|
|
slot_num = reqs[i]->getSlot();
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return slot_num;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
Resource::getSlot(DynInstPtr inst)
|
|
|
|
{
|
2011-02-18 20:28:30 +01:00
|
|
|
int slot_num = -1;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
if (slotsAvail() != 0) {
|
|
|
|
slot_num = availSlots[0];
|
|
|
|
|
|
|
|
vector<int>::iterator vect_it = availSlots.begin();
|
|
|
|
|
|
|
|
assert(slot_num == *vect_it);
|
|
|
|
|
|
|
|
availSlots.erase(vect_it);
|
|
|
|
}
|
|
|
|
|
|
|
|
return slot_num;
|
|
|
|
}
|
|
|
|
|
|
|
|
ResReqPtr
|
|
|
|
Resource::request(DynInstPtr inst)
|
|
|
|
{
|
|
|
|
// See if the resource is already serving this instruction.
|
|
|
|
// If so, use that request;
|
|
|
|
bool try_request = false;
|
2010-02-01 00:30:48 +01:00
|
|
|
int slot_num = -1;
|
2009-02-11 00:49:29 +01:00
|
|
|
int stage_num;
|
|
|
|
ResReqPtr inst_req = findRequest(inst);
|
|
|
|
|
|
|
|
if (inst_req) {
|
|
|
|
// If some preprocessing has to be done on instruction
|
|
|
|
// that has already requested once, then handle it here.
|
|
|
|
// update the 'try_request' variable if we should
|
|
|
|
// re-execute the request.
|
|
|
|
requestAgain(inst, try_request);
|
|
|
|
|
|
|
|
slot_num = inst_req->getSlot();
|
|
|
|
stage_num = inst_req->getStageNum();
|
|
|
|
} else {
|
|
|
|
// Get new slot # for instruction
|
|
|
|
slot_num = getSlot(inst);
|
|
|
|
|
|
|
|
if (slot_num != -1) {
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
DPRINTF(Resource, "Allocating [slot:%i] for [tid:%i]: [sn:%i]\n",
|
|
|
|
slot_num, inst->readTid(), inst->seqNum);
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
// Get Stage # from Schedule Entry
|
2011-02-12 16:14:45 +01:00
|
|
|
stage_num = inst->curSkedEntry->stageNum;
|
|
|
|
unsigned cmd = inst->curSkedEntry->cmd;
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
// Generate Resource Request
|
|
|
|
inst_req = getRequest(inst, stage_num, id, slot_num, cmd);
|
|
|
|
|
|
|
|
if (inst->staticInst) {
|
2010-01-31 23:18:15 +01:00
|
|
|
DPRINTF(Resource, "[tid:%i]: [sn:%i] requesting this "
|
|
|
|
"resource.\n",
|
2009-02-11 00:49:29 +01:00
|
|
|
inst->readTid(), inst->seqNum);
|
|
|
|
} else {
|
2010-01-31 23:18:15 +01:00
|
|
|
DPRINTF(Resource, "[tid:%i]: instruction requesting this "
|
|
|
|
"resource.\n",
|
2009-02-11 00:49:29 +01:00
|
|
|
inst->readTid());
|
|
|
|
}
|
|
|
|
|
|
|
|
try_request = true;
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
} else {
|
|
|
|
DPRINTF(Resource, "No slot available for [tid:%i]: [sn:%i]\n",
|
|
|
|
inst->readTid(), inst->seqNum);
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (try_request) {
|
|
|
|
// Schedule execution of resource
|
|
|
|
scheduleExecution(slot_num);
|
|
|
|
} else {
|
|
|
|
inst_req = deniedReq;
|
|
|
|
rejectRequest(inst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return inst_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::requestAgain(DynInstPtr inst, bool &do_request)
|
|
|
|
{
|
|
|
|
do_request = true;
|
|
|
|
|
|
|
|
if (inst->staticInst) {
|
2010-01-31 23:18:15 +01:00
|
|
|
DPRINTF(Resource, "[tid:%i]: [sn:%i] requesting this resource "
|
|
|
|
"again.\n",
|
2009-02-11 00:49:29 +01:00
|
|
|
inst->readTid(), inst->seqNum);
|
|
|
|
} else {
|
|
|
|
DPRINTF(Resource, "[tid:%i]: requesting this resource again.\n",
|
|
|
|
inst->readTid());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ResReqPtr
|
|
|
|
Resource::getRequest(DynInstPtr inst, int stage_num, int res_idx,
|
|
|
|
int slot_num, unsigned cmd)
|
|
|
|
{
|
2011-02-18 20:28:30 +01:00
|
|
|
reqs[slot_num]->setRequest(inst, stage_num, id, slot_num, cmd);
|
|
|
|
return reqs[slot_num];
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ResReqPtr
|
|
|
|
Resource::findRequest(DynInstPtr inst)
|
|
|
|
{
|
2011-02-18 20:28:30 +01:00
|
|
|
for (int i = 0; i < width; i++) {
|
|
|
|
if (reqs[i]->valid &&
|
|
|
|
reqs[i]->getInst() == inst) {
|
|
|
|
return reqs[i];
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
return NULL;
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::rejectRequest(DynInstPtr inst)
|
|
|
|
{
|
|
|
|
DPRINTF(RefCount, "[tid:%i]: Unable to grant request for [sn:%i].\n",
|
|
|
|
inst->readTid(), inst->seqNum);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::execute(int slot_idx)
|
|
|
|
{
|
|
|
|
DPRINTF(Resource, "[tid:%i]: Executing %s resource.\n",
|
2011-02-18 20:28:30 +01:00
|
|
|
reqs[slot_idx]->getTid(), name());
|
|
|
|
reqs[slot_idx]->setCompleted(true);
|
|
|
|
reqs[slot_idx]->done();
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-05-26 18:23:13 +02:00
|
|
|
Resource::deactivateThread(ThreadID tid)
|
2009-02-11 00:49:29 +01:00
|
|
|
{
|
|
|
|
// In the most basic case, deactivation means squashing everything
|
|
|
|
// from a particular thread
|
2009-05-12 21:01:15 +02:00
|
|
|
DynInstPtr dummy_inst = new InOrderDynInst(cpu, NULL, 0, tid, tid);
|
2009-02-11 00:49:29 +01:00
|
|
|
squash(dummy_inst, 0, 0, tid);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-05-26 18:23:13 +02:00
|
|
|
Resource::squash(DynInstPtr inst, int stage_num, InstSeqNum squash_seq_num,
|
|
|
|
ThreadID tid)
|
2009-02-11 00:49:29 +01:00
|
|
|
{
|
2011-02-18 20:28:30 +01:00
|
|
|
for (int i = 0; i < width; i++) {
|
|
|
|
ResReqPtr req_ptr = reqs[i];
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
if (req_ptr->valid &&
|
2009-02-11 00:49:29 +01:00
|
|
|
req_ptr->getInst()->readTid() == tid &&
|
|
|
|
req_ptr->getInst()->seqNum > squash_seq_num) {
|
|
|
|
|
|
|
|
DPRINTF(Resource, "[tid:%i]: Squashing [sn:%i].\n",
|
|
|
|
req_ptr->getInst()->readTid(),
|
|
|
|
req_ptr->getInst()->seqNum);
|
|
|
|
|
2009-05-12 21:01:16 +02:00
|
|
|
req_ptr->setSquashed();
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
int req_slot_num = req_ptr->getSlot();
|
|
|
|
|
2009-05-12 21:01:16 +02:00
|
|
|
if (resourceEvent[req_slot_num].scheduled())
|
|
|
|
unscheduleEvent(req_slot_num);
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
freeSlot(req_slot_num);
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-01 00:26:13 +01:00
|
|
|
void
|
2010-06-24 21:34:12 +02:00
|
|
|
Resource::squashDueToMemStall(DynInstPtr inst, int stage_num,
|
|
|
|
InstSeqNum squash_seq_num,
|
2010-02-01 00:26:13 +01:00
|
|
|
ThreadID tid)
|
|
|
|
{
|
|
|
|
squash(inst, stage_num, squash_seq_num, tid);
|
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
Tick
|
|
|
|
Resource::ticks(int num_cycles)
|
|
|
|
{
|
|
|
|
return cpu->ticks(num_cycles);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::scheduleExecution(int slot_num)
|
|
|
|
{
|
|
|
|
int res_latency = getLatency(slot_num);
|
|
|
|
|
|
|
|
if (res_latency >= 1) {
|
|
|
|
scheduleEvent(slot_num, res_latency);
|
|
|
|
} else {
|
|
|
|
execute(slot_num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::scheduleEvent(int slot_idx, int delay)
|
|
|
|
{
|
|
|
|
DPRINTF(Resource, "[tid:%i]: Scheduling event for [sn:%i] on tick %i.\n",
|
2011-02-18 20:28:30 +01:00
|
|
|
reqs[slot_idx]->inst->readTid(),
|
|
|
|
reqs[slot_idx]->inst->seqNum,
|
2011-01-08 06:50:29 +01:00
|
|
|
cpu->ticks(delay) + curTick());
|
2009-02-11 00:49:29 +01:00
|
|
|
resourceEvent[slot_idx].scheduleEvent(delay);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Resource::scheduleEvent(DynInstPtr inst, int delay)
|
|
|
|
{
|
|
|
|
int slot_idx = findSlot(inst);
|
|
|
|
|
|
|
|
if(slot_idx != -1)
|
|
|
|
resourceEvent[slot_idx].scheduleEvent(delay);
|
|
|
|
|
|
|
|
return slot_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Resource::unscheduleEvent(int slot_idx)
|
|
|
|
{
|
|
|
|
resourceEvent[slot_idx].unscheduleEvent();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Resource::unscheduleEvent(DynInstPtr inst)
|
|
|
|
{
|
|
|
|
int slot_idx = findSlot(inst);
|
|
|
|
|
|
|
|
if(slot_idx != -1)
|
|
|
|
resourceEvent[slot_idx].unscheduleEvent();
|
|
|
|
|
|
|
|
return slot_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ResourceRequest::resReqID = 0;
|
|
|
|
|
2010-01-31 23:18:15 +01:00
|
|
|
int ResourceRequest::maxReqCount = 0;
|
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
ResourceRequest::ResourceRequest(Resource *_res)
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
: res(_res), inst(NULL), stagePasses(0), valid(false), doneInResource(false),
|
|
|
|
complSlotNum(-1), completed(false), squashed(false), processing(false),
|
|
|
|
memStall(false)
|
2010-01-31 23:18:15 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ResourceRequest::~ResourceRequest()
|
|
|
|
{
|
|
|
|
#ifdef DEBUG
|
|
|
|
res->cpu->resReqCount--;
|
|
|
|
DPRINTF(ResReqCount, "Res. Req %i deleted. resReqCount=%i.\n", reqID,
|
|
|
|
res->cpu->resReqCount);
|
|
|
|
#endif
|
2011-02-18 20:29:26 +01:00
|
|
|
inst = NULL;
|
2010-01-31 23:18:15 +01:00
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
|
2011-02-18 20:28:30 +01:00
|
|
|
void
|
|
|
|
ResourceRequest::setRequest(DynInstPtr _inst, int stage_num,
|
|
|
|
int res_idx, int slot_num, unsigned _cmd)
|
|
|
|
{
|
|
|
|
valid = true;
|
|
|
|
inst = _inst;
|
|
|
|
stageNum = stage_num;
|
|
|
|
resIdx = res_idx;
|
|
|
|
slotNum = slot_num;
|
|
|
|
cmd = _cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ResourceRequest::clearRequest()
|
|
|
|
{
|
|
|
|
valid = false;
|
|
|
|
inst = NULL;
|
|
|
|
stagePasses = 0;
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
completed = false;
|
|
|
|
doneInResource = false;
|
|
|
|
squashed = false;
|
|
|
|
memStall = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ResourceRequest::freeSlot()
|
|
|
|
{
|
|
|
|
assert(res);
|
|
|
|
|
|
|
|
// Free Slot So Another Instruction Can Use This Resource
|
|
|
|
res->freeSlot(slotNum);
|
2011-02-18 20:28:30 +01:00
|
|
|
}
|
|
|
|
|
2009-02-11 00:49:29 +01:00
|
|
|
void
|
|
|
|
ResourceRequest::done(bool completed)
|
|
|
|
{
|
2010-06-24 21:34:12 +02:00
|
|
|
DPRINTF(Resource, "%s [slot:%i] done with request from "
|
|
|
|
"[sn:%i] [tid:%i].\n", res->name(), slotNum,
|
|
|
|
inst->seqNum, inst->readTid());
|
2009-02-11 00:49:29 +01:00
|
|
|
|
|
|
|
setCompleted(completed);
|
|
|
|
|
2010-02-01 00:30:48 +01:00
|
|
|
// Used for debugging purposes
|
|
|
|
if (completed) {
|
|
|
|
complSlotNum = slotNum;
|
|
|
|
}
|
2009-02-11 00:49:29 +01:00
|
|
|
|
inorder: update pipeline interface for handling finished resource reqs
formerly, to free up bandwidth in a resource, we could just change the pointer in that resource
but at the same time the pipeline stages had visibility to see what happened to a resource request.
Now that we are recycling these requests (to avoid too much dynamic allocation), we can't throw
away the request too early or the pipeline stage gets bad information. Instead, mark when a request
is done with the resource all together and then let the pipeline stage call back to the resource
that it's time to free up the bandwidth for more instructions
*** inteface notes ***
- When an instruction completes and is done in a resource for that cycle, call done()
- When an instruction fails and is done with a resource for that cycle, call done(false)
- When an instruction completes, but isnt finished with a resource, call completed()
- When an instruction fails, but isnt finished with a resource, call completed(false)
* * *
inorder: tlbmiss wakeup bug fix
2011-02-18 20:28:37 +01:00
|
|
|
doneInResource = true;
|
2009-02-11 00:49:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ResourceEvent::ResourceEvent()
|
|
|
|
: Event((Event::Priority)Resource_Event_Pri)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
ResourceEvent::ResourceEvent(Resource *res, int slot_idx)
|
|
|
|
: Event((Event::Priority)Resource_Event_Pri), resource(res),
|
|
|
|
slotIdx(slot_idx)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
void
|
|
|
|
ResourceEvent::init(Resource *res, int slot_idx)
|
|
|
|
{
|
|
|
|
resource = res;
|
|
|
|
slotIdx = slot_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ResourceEvent::process()
|
|
|
|
{
|
|
|
|
resource->execute(slotIdx);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
ResourceEvent::description()
|
|
|
|
{
|
|
|
|
string desc = resource->name() + " event";
|
|
|
|
|
|
|
|
return desc.c_str();
|
|
|
|
}
|
2011-01-08 06:50:29 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
ResourceEvent::scheduleEvent(int delay)
|
|
|
|
{
|
2011-01-08 06:50:29 +01:00
|
|
|
assert(!scheduled() || squashed());
|
|
|
|
resource->cpu->reschedule(this,
|
2011-01-08 06:50:29 +01:00
|
|
|
curTick() + resource->ticks(delay), true);
|
2011-01-08 06:50:29 +01:00
|
|
|
}
|