Force prefetches to check cache and MSHRs immediately prior to issue.
This prevents redundant prefetches from being issued, solving the occasional 'needsExclusive && !blk->isWritable()' assertion failure in cache_impl.hh that several people have run into. Eliminates "prefetch_cache_check_push" flag, neither setting of which really solved the problem.
This commit is contained in:
parent
f28ea7a6c9
commit
72cfed4164
2
src/mem/cache/BaseCache.py
vendored
2
src/mem/cache/BaseCache.py
vendored
|
@ -68,8 +68,6 @@ class BaseCache(MemObject):
|
|||
"Latency of the prefetcher")
|
||||
prefetch_policy = Param.Prefetch('none',
|
||||
"Type of prefetcher to use")
|
||||
prefetch_cache_check_push = Param.Bool(True,
|
||||
"Check if in cache on push or pop of prefetch queue")
|
||||
prefetch_use_cpu_id = Param.Bool(True,
|
||||
"Use the CPU ID to separate calculations of prefetches")
|
||||
prefetch_data_accesses_only = Param.Bool(False,
|
||||
|
|
13
src/mem/cache/cache_impl.hh
vendored
13
src/mem/cache/cache_impl.hh
vendored
|
@ -1301,11 +1301,14 @@ Cache<TagStore>::getNextMSHR()
|
|||
// If we have a miss queue slot, we can try a prefetch
|
||||
PacketPtr pkt = prefetcher->getPacket();
|
||||
if (pkt) {
|
||||
// Update statistic on number of prefetches issued
|
||||
// (hwpf_mshr_misses)
|
||||
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
|
||||
// Don't request bus, since we already have it
|
||||
return allocateMissBuffer(pkt, curTick, false);
|
||||
Addr pf_addr = blockAlign(pkt->getAddr());
|
||||
if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
|
||||
// Update statistic on number of prefetches issued
|
||||
// (hwpf_mshr_misses)
|
||||
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
|
||||
// Don't request bus, since we already have it
|
||||
return allocateMissBuffer(pkt, curTick, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
16
src/mem/cache/prefetch/base.cc
vendored
16
src/mem/cache/prefetch/base.cc
vendored
|
@ -45,7 +45,6 @@
|
|||
BasePrefetcher::BasePrefetcher(const BaseCacheParams *p)
|
||||
: size(p->prefetcher_size), pageStop(!p->prefetch_past_page),
|
||||
serialSquash(p->prefetch_serial_squash),
|
||||
cacheCheckPush(p->prefetch_cache_check_push),
|
||||
onlyData(p->prefetch_data_accesses_only)
|
||||
{
|
||||
}
|
||||
|
@ -143,9 +142,6 @@ BasePrefetcher::getPacket()
|
|||
do {
|
||||
pkt = *pf.begin();
|
||||
pf.pop_front();
|
||||
if (!cacheCheckPush) {
|
||||
keep_trying = cache->inCache(pkt->getAddr());
|
||||
}
|
||||
|
||||
if (keep_trying) {
|
||||
DPRINTF(HWPrefetch, "addr 0x%x in cache, skipping\n",
|
||||
|
@ -226,18 +222,6 @@ BasePrefetcher::notify(PacketPtr &pkt, Tick time)
|
|||
"inserting into prefetch queue with delay %d time %d\n",
|
||||
addr, *delayIter, time);
|
||||
|
||||
// Check if it is already in the cache
|
||||
if (cacheCheckPush && cache->inCache(addr)) {
|
||||
DPRINTF(HWPrefetch, "Prefetch addr already in cache\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if it is already in the miss_queue
|
||||
if (cache->inMissQueue(addr)) {
|
||||
DPRINTF(HWPrefetch, "Prefetch addr already in miss queue\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if it is already in the pf buffer
|
||||
if (inPrefetch(addr) != pf.end()) {
|
||||
pfBufferHit++;
|
||||
|
|
4
src/mem/cache/prefetch/base.hh
vendored
4
src/mem/cache/prefetch/base.hh
vendored
|
@ -68,10 +68,6 @@ class BasePrefetcher
|
|||
/** Do we remove prefetches with later times than a new miss.*/
|
||||
bool serialSquash;
|
||||
|
||||
/** Do we check if it is in the cache when inserting into buffer,
|
||||
or removing.*/
|
||||
bool cacheCheckPush;
|
||||
|
||||
/** Do we prefetch on only data reads, or on inst reads as well. */
|
||||
bool onlyData;
|
||||
|
||||
|
|
Loading…
Reference in a new issue