mem: Allowed tagged instruction prefetching in stride prefetcher
For systems with a tightly coupled L2, a stride-based prefetcher may observe access requests from both instruction and data L1 caches. However, the PC address of an instruction miss gives no relevant training information to the stride based prefetcher(there is no stride to train). In theses cases, its better if the L2 stride prefetcher simply reverted back to a simple N-block ahead prefetcher. This patch enables this option. Committed by: Nilay Vaish <nilay@cs.wisc.edu>
This commit is contained in:
parent
95735e10e7
commit
771c864bf4
3 changed files with 22 additions and 1 deletions
2
src/mem/cache/prefetch/Prefetcher.py
vendored
2
src/mem/cache/prefetch/Prefetcher.py
vendored
|
@ -65,6 +65,8 @@ class BasePrefetcher(ClockedObject):
|
||||||
"Only prefetch on read requests (write requests ignored)")
|
"Only prefetch on read requests (write requests ignored)")
|
||||||
on_prefetch = Param.Bool(True,
|
on_prefetch = Param.Bool(True,
|
||||||
"Let lower cache prefetcher train on prefetch requests")
|
"Let lower cache prefetcher train on prefetch requests")
|
||||||
|
inst_tagged = Param.Bool(True,
|
||||||
|
"Perform a tagged prefetch for instruction fetches always")
|
||||||
sys = Param.System(Parent.any, "System this device belongs to")
|
sys = Param.System(Parent.any, "System this device belongs to")
|
||||||
|
|
||||||
class GHBPrefetcher(BasePrefetcher):
|
class GHBPrefetcher(BasePrefetcher):
|
||||||
|
|
17
src/mem/cache/prefetch/stride.cc
vendored
17
src/mem/cache/prefetch/stride.cc
vendored
|
@ -66,6 +66,23 @@ StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
|
||||||
assert(master_id < Max_Contexts);
|
assert(master_id < Max_Contexts);
|
||||||
std::list<StrideEntry*> &tab = table[master_id];
|
std::list<StrideEntry*> &tab = table[master_id];
|
||||||
|
|
||||||
|
// Revert to simple N-block ahead prefetch for instruction fetches
|
||||||
|
if (instTagged && pkt->req->isInstFetch()) {
|
||||||
|
for (int d = 1; d <= degree; d++) {
|
||||||
|
Addr new_addr = data_addr + d * blkSize;
|
||||||
|
if (pageStop && !samePage(data_addr, new_addr)) {
|
||||||
|
// Spanned the page, so now stop
|
||||||
|
pfSpanPage += degree - d + 1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
DPRINTF(HWPrefetch, "queuing prefetch to %x @ %d\n",
|
||||||
|
new_addr, latency);
|
||||||
|
addresses.push_back(new_addr);
|
||||||
|
delays.push_back(latency);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Scan Table for instAddr Match */
|
/* Scan Table for instAddr Match */
|
||||||
std::list<StrideEntry*>::iterator iter;
|
std::list<StrideEntry*>::iterator iter;
|
||||||
for (iter = tab.begin(); iter != tab.end(); iter++) {
|
for (iter = tab.begin(); iter != tab.end(); iter++) {
|
||||||
|
|
4
src/mem/cache/prefetch/stride.hh
vendored
4
src/mem/cache/prefetch/stride.hh
vendored
|
@ -76,10 +76,12 @@ class StridePrefetcher : public BasePrefetcher
|
||||||
|
|
||||||
std::list<StrideEntry*> table[Max_Contexts];
|
std::list<StrideEntry*> table[Max_Contexts];
|
||||||
|
|
||||||
|
bool instTagged;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
StridePrefetcher(const Params *p)
|
StridePrefetcher(const Params *p)
|
||||||
: BasePrefetcher(p)
|
: BasePrefetcher(p), instTagged(p->inst_tagged)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue