Revamp cache timing access mshr check to make stats sane again.
--HG-- extra : convert_revision : 37009b8ee536807073b5a5ca07ed1d097a496aea
This commit is contained in:
parent
bdf3323915
commit
e6d6adc731
2 changed files with 45 additions and 21 deletions
18
src/mem/cache/blk.hh
vendored
18
src/mem/cache/blk.hh
vendored
|
@ -51,8 +51,10 @@ enum CacheBlkStatusBits {
|
||||||
BlkValid = 0x01,
|
BlkValid = 0x01,
|
||||||
/** write permission */
|
/** write permission */
|
||||||
BlkWritable = 0x02,
|
BlkWritable = 0x02,
|
||||||
|
/** read permission (yes, block can be valid but not readable) */
|
||||||
|
BlkReadable = 0x04,
|
||||||
/** dirty (modified) */
|
/** dirty (modified) */
|
||||||
BlkDirty = 0x04,
|
BlkDirty = 0x08,
|
||||||
/** block was referenced */
|
/** block was referenced */
|
||||||
BlkReferenced = 0x10,
|
BlkReferenced = 0x10,
|
||||||
/** block was a hardware prefetch yet unaccessed*/
|
/** block was a hardware prefetch yet unaccessed*/
|
||||||
|
@ -162,7 +164,19 @@ class CacheBlk
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks that a block is valid (readable).
|
* Checks the read permissions of this block. Note that a block
|
||||||
|
* can be valid but not readable if there is an outstanding write
|
||||||
|
* upgrade miss.
|
||||||
|
* @return True if the block is readable.
|
||||||
|
*/
|
||||||
|
bool isReadable() const
|
||||||
|
{
|
||||||
|
const int needed_bits = BlkReadable | BlkValid;
|
||||||
|
return (status & needed_bits) == needed_bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks that a block is valid.
|
||||||
* @return True if the block is valid.
|
* @return True if the block is valid.
|
||||||
*/
|
*/
|
||||||
bool isValid() const
|
bool isValid() const
|
||||||
|
|
48
src/mem/cache/cache_impl.hh
vendored
48
src/mem/cache/cache_impl.hh
vendored
|
@ -292,7 +292,7 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkt->needsExclusive() ? blk->isWritable() : blk->isValid()) {
|
if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
|
||||||
// OK to satisfy access
|
// OK to satisfy access
|
||||||
hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
|
hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
|
||||||
satisfyCpuSideRequest(pkt, blk);
|
satisfyCpuSideRequest(pkt, blk);
|
||||||
|
@ -318,7 +318,7 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
|
||||||
incMissCount(pkt);
|
incMissCount(pkt);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
blk->status = BlkValid;
|
blk->status = BlkValid | BlkReadable;
|
||||||
}
|
}
|
||||||
std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
|
std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
|
||||||
blk->status |= BlkDirty;
|
blk->status |= BlkDirty;
|
||||||
|
@ -422,21 +422,8 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
||||||
}
|
}
|
||||||
|
|
||||||
int lat = hitLatency;
|
int lat = hitLatency;
|
||||||
bool satisfied = false;
|
BlkType *blk = NULL;
|
||||||
|
bool satisfied = access(pkt, blk, lat);
|
||||||
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
|
|
||||||
MSHR *mshr = mshrQueue.findMatch(blk_addr);
|
|
||||||
|
|
||||||
if (!mshr) {
|
|
||||||
// no outstanding access to this block, look up in cache
|
|
||||||
// (otherwise if we allow reads while there's an outstanding
|
|
||||||
// write miss, the read could return stale data out of the
|
|
||||||
// cache block... a more aggressive system could detect the
|
|
||||||
// overlap (if any) and forward data out of the MSHRs, but we
|
|
||||||
// don't do that yet)
|
|
||||||
BlkType *blk = NULL;
|
|
||||||
satisfied = access(pkt, blk, lat);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
/** @todo make the fast write alloc (wh64) work with coherence. */
|
/** @todo make the fast write alloc (wh64) work with coherence. */
|
||||||
|
@ -483,6 +470,9 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
||||||
if (prefetchMiss)
|
if (prefetchMiss)
|
||||||
prefetcher->handleMiss(pkt, time);
|
prefetcher->handleMiss(pkt, time);
|
||||||
|
|
||||||
|
Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
|
||||||
|
MSHR *mshr = mshrQueue.findMatch(blk_addr);
|
||||||
|
|
||||||
if (mshr) {
|
if (mshr) {
|
||||||
// MSHR hit
|
// MSHR hit
|
||||||
//@todo remove hw_pf here
|
//@todo remove hw_pf here
|
||||||
|
@ -508,6 +498,26 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
||||||
if (pkt->cmd == MemCmd::Writeback) {
|
if (pkt->cmd == MemCmd::Writeback) {
|
||||||
allocateWriteBuffer(pkt, time, true);
|
allocateWriteBuffer(pkt, time, true);
|
||||||
} else {
|
} else {
|
||||||
|
if (blk && blk->isValid()) {
|
||||||
|
// If we have a write miss to a valid block, we
|
||||||
|
// need to mark the block non-readable. Otherwise
|
||||||
|
// if we allow reads while there's an outstanding
|
||||||
|
// write miss, the read could return stale data
|
||||||
|
// out of the cache block... a more aggressive
|
||||||
|
// system could detect the overlap (if any) and
|
||||||
|
// forward data out of the MSHRs, but we don't do
|
||||||
|
// that yet. Note that we do need to leave the
|
||||||
|
// block valid so that it stays in the cache, in
|
||||||
|
// case we get an upgrade response (and hence no
|
||||||
|
// new data) when the write miss completes.
|
||||||
|
// As long as CPUs do proper store/load forwarding
|
||||||
|
// internally, and have a sufficiently weak memory
|
||||||
|
// model, this is probably unnecessary, but at some
|
||||||
|
// point it must have seemed like we needed it...
|
||||||
|
assert(pkt->needsExclusive() && !blk->isWritable());
|
||||||
|
blk->status &= ~BlkReadable;
|
||||||
|
}
|
||||||
|
|
||||||
allocateMissBuffer(pkt, time, true);
|
allocateMissBuffer(pkt, time, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -934,10 +944,10 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pkt->sharedAsserted()) {
|
if (!pkt->sharedAsserted()) {
|
||||||
blk->status = BlkValid | BlkWritable;
|
blk->status = BlkValid | BlkReadable | BlkWritable;
|
||||||
} else {
|
} else {
|
||||||
assert(!pkt->needsExclusive());
|
assert(!pkt->needsExclusive());
|
||||||
blk->status = BlkValid;
|
blk->status = BlkValid | BlkReadable;
|
||||||
}
|
}
|
||||||
|
|
||||||
DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
|
DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
|
||||||
|
|
Loading…
Reference in a new issue