2006-06-28 17:02:14 +02:00
|
|
|
/*
|
2016-03-17 14:51:18 +01:00
|
|
|
* Copyright (c) 2010-2016 ARM Limited
|
2015-08-21 13:03:20 +02:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002-2005 The Regents of The University of Michigan
|
|
|
|
* Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
|
2006-06-28 17:02:14 +02:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Authors: Erik Hallnor
|
2015-08-21 13:03:20 +02:00
|
|
|
* Dave Greene
|
|
|
|
* Nathan Binkert
|
2006-06-28 17:02:14 +02:00
|
|
|
* Steve Reinhardt
|
2015-08-21 13:03:20 +02:00
|
|
|
* Ron Dreslinski
|
|
|
|
* Andreas Sandberg
|
2006-06-28 17:02:14 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
2015-08-21 13:03:20 +02:00
|
|
|
* Cache definitions.
|
2006-06-28 17:02:14 +02:00
|
|
|
*/
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
#include "mem/cache/cache.hh"
|
|
|
|
|
|
|
|
#include "base/misc.hh"
|
|
|
|
#include "base/types.hh"
|
|
|
|
#include "debug/Cache.hh"
|
|
|
|
#include "debug/CachePort.hh"
|
|
|
|
#include "debug/CacheTags.hh"
|
2015-12-31 18:32:09 +01:00
|
|
|
#include "debug/CacheVerbose.hh"
|
2015-08-21 13:03:20 +02:00
|
|
|
#include "mem/cache/blk.hh"
|
|
|
|
#include "mem/cache/mshr.hh"
|
|
|
|
#include "mem/cache/prefetch/base.hh"
|
|
|
|
#include "sim/sim_exit.hh"
|
|
|
|
|
2015-08-21 13:03:23 +02:00
|
|
|
Cache::Cache(const CacheParams *p)
|
|
|
|
: BaseCache(p, p->system->cacheLineSize()),
|
2015-08-21 13:03:20 +02:00
|
|
|
tags(p->tags),
|
|
|
|
prefetcher(p->prefetcher),
|
|
|
|
doFastWrites(true),
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
prefetchOnAccess(p->prefetch_on_access),
|
|
|
|
clusivity(p->clusivity),
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
writebackClean(p->writeback_clean),
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
tempBlockWriteback(nullptr),
|
|
|
|
writebackTempBlockAtomicEvent(this, false,
|
|
|
|
EventBase::Delayed_Writeback_Pri)
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
|
|
|
tempBlock = new CacheBlk();
|
|
|
|
tempBlock->data = new uint8_t[blkSize];
|
|
|
|
|
|
|
|
cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
|
|
|
|
"CpuSidePort");
|
|
|
|
memSidePort = new MemSidePort(p->name + ".mem_side", this,
|
|
|
|
"MemSidePort");
|
|
|
|
|
|
|
|
tags->setCache(this);
|
|
|
|
if (prefetcher)
|
|
|
|
prefetcher->setCache(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::~Cache()
|
|
|
|
{
|
|
|
|
delete [] tempBlock->data;
|
|
|
|
delete tempBlock;
|
|
|
|
|
|
|
|
delete cpuSidePort;
|
|
|
|
delete memSidePort;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::regStats()
|
|
|
|
{
|
|
|
|
BaseCache::regStats();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
|
|
|
|
{
|
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
|
|
|
uint64_t overwrite_val;
|
|
|
|
bool overwrite_mem;
|
|
|
|
uint64_t condition_val64;
|
|
|
|
uint32_t condition_val32;
|
|
|
|
|
|
|
|
int offset = tags->extractBlkOffset(pkt->getAddr());
|
|
|
|
uint8_t *blk_data = blk->data + offset;
|
|
|
|
|
|
|
|
assert(sizeof(uint64_t) >= pkt->getSize());
|
|
|
|
|
|
|
|
overwrite_mem = true;
|
|
|
|
// keep a copy of our possible write value, and copy what is at the
|
|
|
|
// memory address into the packet
|
|
|
|
pkt->writeData((uint8_t *)&overwrite_val);
|
|
|
|
pkt->setData(blk_data);
|
|
|
|
|
|
|
|
if (pkt->req->isCondSwap()) {
|
|
|
|
if (pkt->getSize() == sizeof(uint64_t)) {
|
|
|
|
condition_val64 = pkt->req->getExtraData();
|
|
|
|
overwrite_mem = !std::memcmp(&condition_val64, blk_data,
|
|
|
|
sizeof(uint64_t));
|
|
|
|
} else if (pkt->getSize() == sizeof(uint32_t)) {
|
|
|
|
condition_val32 = (uint32_t)pkt->req->getExtraData();
|
|
|
|
overwrite_mem = !std::memcmp(&condition_val32, blk_data,
|
|
|
|
sizeof(uint32_t));
|
|
|
|
} else
|
|
|
|
panic("Invalid size for conditional read/write\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (overwrite_mem) {
|
|
|
|
std::memcpy(blk_data, &overwrite_val, pkt->getSize());
|
|
|
|
blk->status |= BlkDirty;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2016-08-12 15:11:45 +02:00
|
|
|
Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
|
|
|
|
bool deferred_response, bool pending_downgrade)
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
|
|
|
assert(blk && blk->isValid());
|
|
|
|
// Occasionally this is not true... if we are a lower-level cache
|
|
|
|
// satisfying a string of Read and ReadEx requests from
|
|
|
|
// upper-level caches, a Read will mark the block as shared but we
|
|
|
|
// can satisfy a following ReadEx anyway since we can rely on the
|
|
|
|
// Read requester(s) to have buffered the ReadEx snoop and to
|
|
|
|
// invalidate their blocks after receiving them.
|
2015-12-31 15:32:58 +01:00
|
|
|
// assert(!pkt->needsWritable() || blk->isWritable());
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
|
|
|
|
|
|
|
|
// Check RMW operations first since both isRead() and
|
|
|
|
// isWrite() will be true for them
|
|
|
|
if (pkt->cmd == MemCmd::SwapReq) {
|
|
|
|
cmpAndSwap(blk, pkt);
|
|
|
|
} else if (pkt->isWrite()) {
|
2015-12-31 15:32:58 +01:00
|
|
|
// we have the block in a writable state and can go ahead,
|
|
|
|
// note that the line may be also be considered writable in
|
|
|
|
// downstream caches along the path to memory, but always
|
|
|
|
// Exclusive, and never Modified
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(blk->isWritable());
|
2015-12-31 15:32:58 +01:00
|
|
|
// Write or WriteLine at the first cache with block in writable state
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk->checkWrite(pkt)) {
|
|
|
|
pkt->writeDataToBlock(blk->data, blkSize);
|
|
|
|
}
|
2015-12-31 15:32:58 +01:00
|
|
|
// Always mark the line as dirty (and thus transition to the
|
|
|
|
// Modified state) even if we are a failed StoreCond so we
|
|
|
|
// supply data to any snoops that have appended themselves to
|
|
|
|
// this cache before knowing the store will fail.
|
2015-08-21 13:03:20 +02:00
|
|
|
blk->status |= BlkDirty;
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (write)\n",
|
|
|
|
__func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
2015-08-21 13:03:20 +02:00
|
|
|
} else if (pkt->isRead()) {
|
|
|
|
if (pkt->isLLSC()) {
|
|
|
|
blk->trackLoadLocked(pkt);
|
|
|
|
}
|
2015-12-31 15:33:39 +01:00
|
|
|
|
|
|
|
// all read responses have a data payload
|
|
|
|
assert(pkt->hasRespData());
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->setDataFromBlock(blk->data, blkSize);
|
2015-12-31 15:33:39 +01:00
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// determine if this read is from a (coherent) cache or not
|
|
|
|
if (pkt->fromCache()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(pkt->getSize() == blkSize);
|
|
|
|
// special handling for coherent block requests from
|
|
|
|
// upper-level caches
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->needsWritable()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// sanity check
|
|
|
|
assert(pkt->cmd == MemCmd::ReadExReq ||
|
|
|
|
pkt->cmd == MemCmd::SCUpgradeFailReq);
|
|
|
|
|
|
|
|
// if we have a dirty copy, make sure the recipient
|
2015-12-31 15:32:58 +01:00
|
|
|
// keeps it marked dirty (in the modified state)
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk->isDirty()) {
|
2015-12-31 15:32:58 +01:00
|
|
|
pkt->setCacheResponding();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
// on ReadExReq we give up our copy unconditionally,
|
|
|
|
// even if this cache is mostly inclusive, we may want
|
|
|
|
// to revisit this
|
|
|
|
invalidateBlock(blk);
|
2015-08-21 13:03:20 +02:00
|
|
|
} else if (blk->isWritable() && !pending_downgrade &&
|
2015-12-31 15:32:58 +01:00
|
|
|
!pkt->hasSharers() &&
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmd != MemCmd::ReadCleanReq) {
|
2015-12-31 15:32:58 +01:00
|
|
|
// we can give the requester a writable copy on a read
|
|
|
|
// request if:
|
|
|
|
// - we have a writable copy at this level (& below)
|
2015-08-21 13:03:20 +02:00
|
|
|
// - we don't have a pending snoop from below
|
|
|
|
// signaling another read request
|
|
|
|
// - no other cache above has a copy (otherwise it
|
2015-12-31 15:32:58 +01:00
|
|
|
// would have set hasSharers flag when
|
|
|
|
// snooping the packet)
|
|
|
|
// - the read has explicitly asked for a clean
|
|
|
|
// copy of the line
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk->isDirty()) {
|
|
|
|
// special considerations if we're owner:
|
|
|
|
if (!deferred_response) {
|
2015-12-31 15:32:58 +01:00
|
|
|
// respond with the line in Modified state
|
|
|
|
// (cacheResponding set, hasSharers not set)
|
|
|
|
pkt->setCacheResponding();
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// if this cache is mostly inclusive, we
|
|
|
|
// keep the block in the Exclusive state,
|
|
|
|
// and pass it upwards as Modified
|
|
|
|
// (writable and dirty), hence we have
|
|
|
|
// multiple caches, all on the same path
|
|
|
|
// towards memory, all considering the
|
|
|
|
// same block writable, but only one
|
|
|
|
// considering it Modified
|
|
|
|
|
|
|
|
// we get away with multiple caches (on
|
|
|
|
// the same path to memory) considering
|
|
|
|
// the block writeable as we always enter
|
|
|
|
// the cache hierarchy through a cache,
|
|
|
|
// and first snoop upwards in all other
|
|
|
|
// branches
|
|
|
|
blk->status &= ~BlkDirty;
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
|
|
|
// if we're responding after our own miss,
|
|
|
|
// there's a window where the recipient didn't
|
|
|
|
// know it was getting ownership and may not
|
|
|
|
// have responded to snoops correctly, so we
|
2015-12-31 15:32:58 +01:00
|
|
|
// have to respond with a shared line
|
|
|
|
pkt->setHasSharers();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// otherwise only respond with a shared copy
|
2015-12-31 15:32:58 +01:00
|
|
|
pkt->setHasSharers();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2015-12-31 15:32:58 +01:00
|
|
|
// Upgrade or Invalidate
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(pkt->isUpgrade() || pkt->isInvalidate());
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
|
|
|
|
// for invalidations we could be looking at the temp block
|
|
|
|
// (for upgrades we always allocate)
|
|
|
|
invalidateBlock(blk);
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (invalidation)\n",
|
2015-08-21 13:03:20 +02:00
|
|
|
__func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Access path: requests coming in from the CPU side
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
|
|
|
|
PacketList &writebacks)
|
|
|
|
{
|
|
|
|
// sanity check
|
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
|
|
|
chatty_assert(!(isReadOnly && pkt->isWrite()),
|
|
|
|
"Should never see a write in a read-only cache %s\n",
|
|
|
|
name());
|
|
|
|
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
|
|
|
|
|
|
|
if (pkt->req->isUncacheable()) {
|
|
|
|
DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
|
|
|
|
pkt->req->isInstFetch() ? " (ifetch)" : "",
|
|
|
|
pkt->getAddr());
|
|
|
|
|
|
|
|
// flush and invalidate any existing block
|
|
|
|
CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
|
|
|
|
if (old_blk && old_blk->isValid()) {
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (old_blk->isDirty() || writebackClean)
|
2015-08-21 13:03:20 +02:00
|
|
|
writebacks.push_back(writebackBlk(old_blk));
|
|
|
|
else
|
|
|
|
writebacks.push_back(cleanEvictBlk(old_blk));
|
|
|
|
tags->invalidate(old_blk);
|
|
|
|
old_blk->invalidate();
|
|
|
|
}
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
blk = nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
// lookupLatency is the latency in case the request is uncacheable.
|
|
|
|
lat = lookupLatency;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ContextID id = pkt->req->hasContextId() ?
|
|
|
|
pkt->req->contextId() : InvalidContextID;
|
|
|
|
// Here lat is the value passed as parameter to accessBlock() function
|
|
|
|
// that can modify its value.
|
|
|
|
blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
|
|
|
|
|
|
|
|
DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(),
|
|
|
|
pkt->req->isInstFetch() ? " (ifetch)" : "",
|
|
|
|
pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns",
|
|
|
|
blk ? "hit " + blk->print() : "miss");
|
|
|
|
|
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (pkt->isEviction()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// We check for presence of block in above caches before issuing
|
|
|
|
// Writeback or CleanEvict to write buffer. Therefore the only
|
|
|
|
// possible cases can be of a CleanEvict packet coming from above
|
|
|
|
// encountering a Writeback generated in this cache peer cache and
|
|
|
|
// waiting in the write buffer. Cases of upper level peer caches
|
|
|
|
// generating CleanEvict and Writeback or simply CleanEvict and
|
|
|
|
// CleanEvict almost simultaneously will be caught by snoops sent out
|
|
|
|
// by crossbar.
|
2016-03-17 14:51:18 +01:00
|
|
|
WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
|
|
|
|
pkt->isSecure());
|
|
|
|
if (wb_entry) {
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(wb_entry->getNumTargets() == 1);
|
|
|
|
PacketPtr wbPkt = wb_entry->getTarget()->pkt;
|
|
|
|
assert(wbPkt->isWriteback());
|
|
|
|
|
|
|
|
if (pkt->isCleanEviction()) {
|
|
|
|
// The CleanEvict and WritebackClean snoops into other
|
|
|
|
// peer caches of the same level while traversing the
|
|
|
|
// crossbar. If a copy of the block is found, the
|
|
|
|
// packet is deleted in the crossbar. Hence, none of
|
|
|
|
// the other upper level caches connected to this
|
|
|
|
// cache have the block, so we can clear the
|
|
|
|
// BLOCK_CACHED flag in the Writeback if set and
|
|
|
|
// discard the CleanEvict by returning true.
|
|
|
|
wbPkt->clearBlockCached();
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
assert(pkt->cmd == MemCmd::WritebackDirty);
|
|
|
|
// Dirty writeback from above trumps our clean
|
|
|
|
// writeback... discard here
|
|
|
|
// Note: markInService will remove entry from writeback buffer.
|
2016-03-17 14:51:18 +01:00
|
|
|
markInService(wb_entry);
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
delete wbPkt;
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Writeback handling is special case. We can write the block into
|
|
|
|
// the cache without having a writeable copy (or any copy at all).
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (pkt->isWriteback()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(blkSize == pkt->getSize());
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
|
|
|
|
// we could get a clean writeback while we are having
|
|
|
|
// outstanding accesses to a block, do the simple thing for
|
|
|
|
// now and drop the clean writeback so that we do not upset
|
|
|
|
// any ordering/decisions about ownership already taken
|
|
|
|
if (pkt->cmd == MemCmd::WritebackClean &&
|
|
|
|
mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
|
|
|
|
DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
|
|
|
|
"dropping\n", pkt->getAddr());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
if (blk == nullptr) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// need to do a replacement
|
|
|
|
blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
|
2016-05-26 12:56:24 +02:00
|
|
|
if (blk == nullptr) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// no replaceable block available: give up, fwd to next level.
|
|
|
|
incMissCount(pkt);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tags->insertBlock(pkt, blk);
|
|
|
|
|
|
|
|
blk->status = (BlkValid | BlkReadable);
|
|
|
|
if (pkt->isSecure()) {
|
|
|
|
blk->status |= BlkSecure;
|
|
|
|
}
|
|
|
|
}
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
// only mark the block dirty if we got a writeback command,
|
|
|
|
// and leave it as is for a clean writeback
|
|
|
|
if (pkt->cmd == MemCmd::WritebackDirty) {
|
|
|
|
blk->status |= BlkDirty;
|
|
|
|
}
|
2015-12-31 15:32:58 +01:00
|
|
|
// if the packet does not have sharers, it is passing
|
|
|
|
// writable, and we got the writeback in Modified or Exclusive
|
|
|
|
// state, if not we are in the Owned or Shared state
|
|
|
|
if (!pkt->hasSharers()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
blk->status |= BlkWritable;
|
|
|
|
}
|
|
|
|
// nothing else to do; writeback doesn't expect response
|
|
|
|
assert(!pkt->needsResponse());
|
|
|
|
std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
|
|
|
|
DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
|
|
|
|
incHitCount(pkt);
|
|
|
|
return true;
|
|
|
|
} else if (pkt->cmd == MemCmd::CleanEvict) {
|
2016-05-26 12:56:24 +02:00
|
|
|
if (blk != nullptr) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// Found the block in the tags, need to stop CleanEvict from
|
|
|
|
// propagating further down the hierarchy. Returning true will
|
|
|
|
// treat the CleanEvict like a satisfied write request and delete
|
|
|
|
// it.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// We didn't find the block here, propagate the CleanEvict further
|
|
|
|
// down the memory hierarchy. Returning false will treat the CleanEvict
|
|
|
|
// like a Writeback which could not find a replaceable block so has to
|
|
|
|
// go to next level.
|
|
|
|
return false;
|
2016-08-12 15:11:45 +02:00
|
|
|
} else if (blk && (pkt->needsWritable() ? blk->isWritable() :
|
|
|
|
blk->isReadable())) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// OK to satisfy access
|
|
|
|
incHitCount(pkt);
|
2016-08-12 15:11:45 +02:00
|
|
|
satisfyRequest(pkt, blk);
|
|
|
|
maintainClusivity(pkt->fromCache(), blk);
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
// Can't satisfy access normally... either no block (blk == nullptr)
|
2015-12-31 15:32:58 +01:00
|
|
|
// or have block but need writable
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
incMissCount(pkt);
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
if (blk == nullptr && pkt->isLLSC() && pkt->isWrite()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// complete miss on store conditional... just give up now
|
|
|
|
pkt->req->setExtraData(0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
void
|
|
|
|
Cache::maintainClusivity(bool from_cache, CacheBlk *blk)
|
|
|
|
{
|
|
|
|
if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
|
|
|
|
clusivity == Enums::mostly_excl) {
|
|
|
|
// if we have responded to a cache, and our block is still
|
|
|
|
// valid, but not dirty, and this cache is mostly exclusive
|
|
|
|
// with respect to the cache above, drop the block
|
|
|
|
invalidateBlock(blk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
void
|
|
|
|
Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
|
|
|
|
{
|
|
|
|
while (!writebacks.empty()) {
|
|
|
|
PacketPtr wbPkt = writebacks.front();
|
|
|
|
// We use forwardLatency here because we are copying writebacks to
|
|
|
|
// write buffer. Call isCachedAbove for both Writebacks and
|
|
|
|
// CleanEvicts. If isCachedAbove returns true we set BLOCK_CACHED flag
|
|
|
|
// in Writebacks and discard CleanEvicts.
|
|
|
|
if (isCachedAbove(wbPkt)) {
|
|
|
|
if (wbPkt->cmd == MemCmd::CleanEvict) {
|
|
|
|
// Delete CleanEvict because cached copies exist above. The
|
|
|
|
// packet destructor will delete the request object because
|
|
|
|
// this is a non-snoop request packet which does not require a
|
|
|
|
// response.
|
|
|
|
delete wbPkt;
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
} else if (wbPkt->cmd == MemCmd::WritebackClean) {
|
|
|
|
// clean writeback, do not send since the block is
|
|
|
|
// still cached above
|
|
|
|
assert(writebackClean);
|
|
|
|
delete wbPkt;
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(wbPkt->cmd == MemCmd::WritebackDirty);
|
2015-08-21 13:03:20 +02:00
|
|
|
// Set BLOCK_CACHED flag in Writeback and send below, so that
|
|
|
|
// the Writeback does not reset the bit corresponding to this
|
|
|
|
// address in the snoop filter below.
|
|
|
|
wbPkt->setBlockCached();
|
|
|
|
allocateWriteBuffer(wbPkt, forward_time);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the block is not cached above, send packet below. Both
|
|
|
|
// CleanEvict and Writeback with BLOCK_CACHED flag cleared will
|
|
|
|
// reset the bit corresponding to this address in the snoop filter
|
|
|
|
// below.
|
|
|
|
allocateWriteBuffer(wbPkt, forward_time);
|
|
|
|
}
|
|
|
|
writebacks.pop_front();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
void
|
|
|
|
Cache::doWritebacksAtomic(PacketList& writebacks)
|
|
|
|
{
|
|
|
|
while (!writebacks.empty()) {
|
|
|
|
PacketPtr wbPkt = writebacks.front();
|
|
|
|
// Call isCachedAbove for both Writebacks and CleanEvicts. If
|
|
|
|
// isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
|
|
|
|
// and discard CleanEvicts.
|
|
|
|
if (isCachedAbove(wbPkt, false)) {
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (wbPkt->cmd == MemCmd::WritebackDirty) {
|
2015-09-25 13:26:57 +02:00
|
|
|
// Set BLOCK_CACHED flag in Writeback and send below,
|
|
|
|
// so that the Writeback does not reset the bit
|
|
|
|
// corresponding to this address in the snoop filter
|
|
|
|
// below. We can discard CleanEvicts because cached
|
|
|
|
// copies exist above. Atomic mode isCachedAbove
|
|
|
|
// modifies packet to set BLOCK_CACHED flag
|
|
|
|
memSidePort->sendAtomic(wbPkt);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the block is not cached above, send packet below. Both
|
|
|
|
// CleanEvict and Writeback with BLOCK_CACHED flag cleared will
|
|
|
|
// reset the bit corresponding to this address in the snoop filter
|
|
|
|
// below.
|
|
|
|
memSidePort->sendAtomic(wbPkt);
|
|
|
|
}
|
|
|
|
writebacks.pop_front();
|
|
|
|
// In case of CleanEvicts, the packet destructor will delete the
|
|
|
|
// request object because this is a non-snoop request packet which
|
|
|
|
// does not require a response.
|
|
|
|
delete wbPkt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
void
|
|
|
|
Cache::recvTimingSnoopResp(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
|
|
|
|
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
|
|
|
|
|
|
|
assert(pkt->isResponse());
|
|
|
|
assert(!system->bypassCaches());
|
|
|
|
|
2015-12-28 17:14:14 +01:00
|
|
|
// determine if the response is from a snoop request we created
|
|
|
|
// (in which case it should be in the outstandingSnoop), or if we
|
|
|
|
// merely forwarded someone else's snoop request
|
|
|
|
const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
|
|
|
|
outstandingSnoop.end();
|
|
|
|
|
|
|
|
if (!forwardAsSnoop) {
|
|
|
|
// the packet came from this cache, so sink it here and do not
|
|
|
|
// forward it
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(pkt->cmd == MemCmd::HardPFResp);
|
2015-12-28 17:14:14 +01:00
|
|
|
|
|
|
|
outstandingSnoop.erase(pkt->req);
|
|
|
|
|
|
|
|
DPRINTF(Cache, "Got prefetch response from above for addr "
|
|
|
|
"%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
|
2015-08-21 13:03:20 +02:00
|
|
|
recvTimingResp(pkt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// forwardLatency is set here because there is a response from an
|
|
|
|
// upper level cache.
|
|
|
|
// To pay the delay that occurs if the packet comes from the bus,
|
|
|
|
// we charge also headerDelay.
|
|
|
|
Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
|
|
|
|
// Reset the timing of the packet.
|
|
|
|
pkt->headerDelay = pkt->payloadDelay = 0;
|
|
|
|
memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::promoteWholeLineWrites(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// Cache line clearing instructions
|
|
|
|
if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
|
|
|
|
(pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
|
|
|
|
pkt->cmd = MemCmd::WriteLineReq;
|
|
|
|
DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::recvTimingReq(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
|
|
|
|
|
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
|
|
|
// Just forward the packet if caches are disabled.
|
|
|
|
if (system->bypassCaches()) {
|
|
|
|
// @todo This should really enqueue the packet rather
|
|
|
|
bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
|
|
|
|
assert(success);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
promoteWholeLineWrites(pkt);
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->cacheResponding()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// a cache above us (but not where the packet came from) is
|
2015-12-31 15:32:58 +01:00
|
|
|
// responding to the request, in other words it has the line
|
|
|
|
// in Modified or Owned state
|
|
|
|
DPRINTF(Cache, "Cache above responding to %#llx (%s): "
|
|
|
|
"not responding\n",
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// if the packet needs the block to be writable, and the cache
|
|
|
|
// that has promised to respond (setting the cache responding
|
|
|
|
// flag) is not providing writable (it is in Owned rather than
|
|
|
|
// the Modified state), we know that there may be other Shared
|
|
|
|
// copies in the system; go out and invalidate them all
|
2016-02-10 10:08:25 +01:00
|
|
|
assert(pkt->needsWritable() && !pkt->responderHadWritable());
|
|
|
|
|
|
|
|
// an upstream cache that had the line in Owned state
|
|
|
|
// (dirty, but not writable), is responding and thus
|
|
|
|
// transferring the dirty line from one branch of the
|
|
|
|
// cache hierarchy to another
|
|
|
|
|
|
|
|
// send out an express snoop and invalidate all other
|
|
|
|
// copies (snooping a packet that needs writable is the
|
|
|
|
// same as an invalidation), thus turning the Owned line
|
|
|
|
// into a Modified line, note that we don't invalidate the
|
|
|
|
// block in the current cache or any other cache on the
|
|
|
|
// path to memory
|
|
|
|
|
|
|
|
// create a downstream express snoop with cleared packet
|
|
|
|
// flags, there is no need to allocate any data as the
|
|
|
|
// packet is merely used to co-ordinate state transitions
|
|
|
|
Packet *snoop_pkt = new Packet(pkt, true, false);
|
|
|
|
|
|
|
|
// also reset the bus time that the original packet has
|
|
|
|
// not yet paid for
|
|
|
|
snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
|
|
|
|
|
|
|
|
// make this an instantaneous express snoop, and let the
|
|
|
|
// other caches in the system know that the another cache
|
|
|
|
// is responding, because we have found the authorative
|
|
|
|
// copy (Modified or Owned) that will supply the right
|
|
|
|
// data
|
|
|
|
snoop_pkt->setExpressSnoop();
|
|
|
|
snoop_pkt->setCacheResponding();
|
|
|
|
|
|
|
|
// this express snoop travels towards the memory, and at
|
|
|
|
// every crossbar it is snooped upwards thus reaching
|
|
|
|
// every cache in the system
|
|
|
|
bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
|
|
|
|
// express snoops always succeed
|
|
|
|
assert(success);
|
|
|
|
|
|
|
|
// main memory will delete the snoop packet
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// queue for deletion, as opposed to immediate deletion, as
|
|
|
|
// the sending cache is still relying on the packet
|
2015-11-06 09:26:21 +01:00
|
|
|
pendingDelete.reset(pkt);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-02-10 10:08:25 +01:00
|
|
|
// no need to take any further action in this particular cache
|
|
|
|
// as an upstram cache has already committed to responding,
|
|
|
|
// and we have already sent out any express snoops in the
|
|
|
|
// section above to ensure all other copies in the system are
|
|
|
|
// invalidated
|
2015-08-21 13:03:20 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// anything that is merely forwarded pays for the forward latency and
|
|
|
|
// the delay provided by the crossbar
|
|
|
|
Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
|
|
|
|
|
|
|
|
// We use lookupLatency here because it is used to specify the latency
|
|
|
|
// to access.
|
|
|
|
Cycles lat = lookupLatency;
|
2016-05-26 12:56:24 +02:00
|
|
|
CacheBlk *blk = nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
bool satisfied = false;
|
|
|
|
{
|
|
|
|
PacketList writebacks;
|
|
|
|
// Note that lat is passed by reference here. The function
|
|
|
|
// access() calls accessBlock() which can modify lat value.
|
|
|
|
satisfied = access(pkt, blk, lat, writebacks);
|
|
|
|
|
|
|
|
// copy writebacks to write buffer here to ensure they logically
|
|
|
|
// proceed anything happening below
|
|
|
|
doWritebacks(writebacks, forward_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Here we charge the headerDelay that takes into account the latencies
|
|
|
|
// of the bus, if the packet comes from it.
|
|
|
|
// The latency charged it is just lat that is the value of lookupLatency
|
|
|
|
// modified by access() function, or if not just lookupLatency.
|
|
|
|
// In case of a hit we are neglecting response latency.
|
|
|
|
// In case of a miss we are neglecting forward latency.
|
|
|
|
Tick request_time = clockEdge(lat) + pkt->headerDelay;
|
|
|
|
// Here we reset the timing of the packet.
|
|
|
|
pkt->headerDelay = pkt->payloadDelay = 0;
|
|
|
|
|
|
|
|
// track time of availability of next prefetch, if any
|
|
|
|
Tick next_pf_time = MaxTick;
|
|
|
|
|
|
|
|
bool needsResponse = pkt->needsResponse();
|
|
|
|
|
|
|
|
if (satisfied) {
|
|
|
|
// should never be satisfying an uncacheable access as we
|
|
|
|
// flush and invalidate any existing block as part of the
|
|
|
|
// lookup
|
|
|
|
assert(!pkt->req->isUncacheable());
|
|
|
|
|
|
|
|
// hit (for all other request types)
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
if (prefetcher && (prefetchOnAccess ||
|
|
|
|
(blk && blk->wasPrefetched()))) {
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk)
|
|
|
|
blk->status &= ~BlkHWPrefetched;
|
|
|
|
|
|
|
|
// Don't notify on SWPrefetch
|
|
|
|
if (!pkt->cmd.isSWPrefetch())
|
|
|
|
next_pf_time = prefetcher->notify(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (needsResponse) {
|
|
|
|
pkt->makeTimingResponse();
|
|
|
|
// @todo: Make someone pay for this
|
|
|
|
pkt->headerDelay = pkt->payloadDelay = 0;
|
|
|
|
|
|
|
|
// In this case we are considering request_time that takes
|
|
|
|
// into account the delay of the xbar, if any, and just
|
|
|
|
// lat, neglecting responseLatency, modelling hit latency
|
|
|
|
// just as lookupLatency or or the value of lat overriden
|
|
|
|
// by access(), that calls accessBlock() function.
|
2015-11-06 09:26:37 +01:00
|
|
|
cpuSidePort->schedTimingResp(pkt, request_time, true);
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
|
2016-07-11 11:39:22 +02:00
|
|
|
__func__, pkt->cmdString(), pkt->getAddr());
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
|
2015-11-06 09:26:21 +01:00
|
|
|
// queue the packet for deletion, as the sending cache is
|
|
|
|
// still relying on it; if the block is found in access(),
|
|
|
|
// CleanEvict and Writeback messages will be deleted
|
|
|
|
// here as well
|
|
|
|
pendingDelete.reset(pkt);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// miss
|
|
|
|
|
|
|
|
Addr blk_addr = blockAlign(pkt->getAddr());
|
|
|
|
|
|
|
|
// ignore any existing MSHR if we are dealing with an
|
|
|
|
// uncacheable request
|
|
|
|
MSHR *mshr = pkt->req->isUncacheable() ? nullptr :
|
|
|
|
mshrQueue.findMatch(blk_addr, pkt->isSecure());
|
|
|
|
|
|
|
|
// Software prefetch handling:
|
|
|
|
// To keep the core from waiting on data it won't look at
|
|
|
|
// anyway, send back a response with dummy data. Miss handling
|
|
|
|
// will continue asynchronously. Unfortunately, the core will
|
|
|
|
// insist upon freeing original Packet/Request, so we have to
|
|
|
|
// create a new pair with a different lifecycle. Note that this
|
|
|
|
// processing happens before any MSHR munging on the behalf of
|
|
|
|
// this request because this new Request will be the one stored
|
|
|
|
// into the MSHRs, not the original.
|
|
|
|
if (pkt->cmd.isSWPrefetch()) {
|
|
|
|
assert(needsResponse);
|
|
|
|
assert(pkt->req->hasPaddr());
|
|
|
|
assert(!pkt->req->isUncacheable());
|
|
|
|
|
|
|
|
// There's no reason to add a prefetch as an additional target
|
|
|
|
// to an existing MSHR. If an outstanding request is already
|
|
|
|
// in progress, there is nothing for the prefetch to do.
|
|
|
|
// If this is the case, we don't even create a request at all.
|
|
|
|
PacketPtr pf = nullptr;
|
|
|
|
|
|
|
|
if (!mshr) {
|
|
|
|
// copy the request and create a new SoftPFReq packet
|
|
|
|
RequestPtr req = new Request(pkt->req->getPaddr(),
|
|
|
|
pkt->req->getSize(),
|
|
|
|
pkt->req->getFlags(),
|
|
|
|
pkt->req->masterId());
|
|
|
|
pf = new Packet(req, pkt->cmd);
|
|
|
|
pf->allocate();
|
|
|
|
assert(pf->getAddr() == pkt->getAddr());
|
|
|
|
assert(pf->getSize() == pkt->getSize());
|
|
|
|
}
|
|
|
|
|
|
|
|
pkt->makeTimingResponse();
|
2015-12-31 15:33:39 +01:00
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// request_time is used here, taking into account lat and the delay
|
|
|
|
// charged if the packet comes from the xbar.
|
2015-11-06 09:26:37 +01:00
|
|
|
cpuSidePort->schedTimingResp(pkt, request_time, true);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// If an outstanding request is in progress (we found an
|
|
|
|
// MSHR) this is set to null
|
|
|
|
pkt = pf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mshr) {
|
|
|
|
/// MSHR hit
|
|
|
|
/// @note writebacks will be checked in getNextMSHR()
|
|
|
|
/// for any conflicting requests to the same block
|
|
|
|
|
|
|
|
//@todo remove hw_pf here
|
|
|
|
|
|
|
|
// Coalesce unless it was a software prefetch (see above).
|
|
|
|
if (pkt) {
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(!pkt->isWriteback());
|
|
|
|
// CleanEvicts corresponding to blocks which have
|
|
|
|
// outstanding requests in MSHRs are simply sunk here
|
2015-08-21 13:03:20 +02:00
|
|
|
if (pkt->cmd == MemCmd::CleanEvict) {
|
2015-11-06 09:26:21 +01:00
|
|
|
pendingDelete.reset(pkt);
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
2016-05-26 12:56:24 +02:00
|
|
|
DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx "
|
|
|
|
"size %d\n", __func__, pkt->cmdString(),
|
|
|
|
pkt->getAddr(), pkt->getSize());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
assert(pkt->req->masterId() < system->maxMasters());
|
|
|
|
mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
|
|
|
|
// We use forward_time here because it is the same
|
|
|
|
// considering new targets. We have multiple
|
|
|
|
// requests for the same address here. It
|
|
|
|
// specifies the latency to allocate an internal
|
|
|
|
// buffer and to schedule an event to the queued
|
|
|
|
// port and also takes into account the additional
|
|
|
|
// delay of the xbar.
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
mshr->allocateTarget(pkt, forward_time, order++,
|
|
|
|
allocOnFill(pkt->cmd));
|
2015-08-21 13:03:20 +02:00
|
|
|
if (mshr->getNumTargets() == numTarget) {
|
|
|
|
noTargetMSHR = mshr;
|
|
|
|
setBlocked(Blocked_NoTargets);
|
|
|
|
// need to be careful with this... if this mshr isn't
|
|
|
|
// ready yet (i.e. time > curTick()), we don't want to
|
|
|
|
// move it ahead of mshrs that are ready
|
|
|
|
// mshrQueue.moveToFront(mshr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// We should call the prefetcher reguardless if the request is
|
2016-05-26 12:56:24 +02:00
|
|
|
// satisfied or not, reguardless if the request is in the MSHR
|
|
|
|
// or not. The request could be a ReadReq hit, but still not
|
2015-08-21 13:03:20 +02:00
|
|
|
// satisfied (potentially because of a prior write to the same
|
|
|
|
// cache line. So, even when not satisfied, tehre is an MSHR
|
2016-05-26 12:56:24 +02:00
|
|
|
// already allocated for this, we need to let the prefetcher
|
|
|
|
// know about the request
|
2015-08-21 13:03:20 +02:00
|
|
|
if (prefetcher) {
|
|
|
|
// Don't notify on SWPrefetch
|
|
|
|
if (!pkt->cmd.isSWPrefetch())
|
|
|
|
next_pf_time = prefetcher->notify(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// no MSHR
|
|
|
|
assert(pkt->req->masterId() < system->maxMasters());
|
|
|
|
if (pkt->req->isUncacheable()) {
|
|
|
|
mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
|
|
|
|
} else {
|
|
|
|
mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
|
|
|
|
}
|
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (pkt->isEviction() ||
|
2015-08-21 13:03:20 +02:00
|
|
|
(pkt->req->isUncacheable() && pkt->isWrite())) {
|
|
|
|
// We use forward_time here because there is an
|
|
|
|
// uncached memory write, forwarded to WriteBuffer.
|
|
|
|
allocateWriteBuffer(pkt, forward_time);
|
|
|
|
} else {
|
|
|
|
if (blk && blk->isValid()) {
|
|
|
|
// should have flushed and have no valid block
|
|
|
|
assert(!pkt->req->isUncacheable());
|
|
|
|
|
|
|
|
// If we have a write miss to a valid block, we
|
|
|
|
// need to mark the block non-readable. Otherwise
|
|
|
|
// if we allow reads while there's an outstanding
|
|
|
|
// write miss, the read could return stale data
|
|
|
|
// out of the cache block... a more aggressive
|
|
|
|
// system could detect the overlap (if any) and
|
|
|
|
// forward data out of the MSHRs, but we don't do
|
|
|
|
// that yet. Note that we do need to leave the
|
|
|
|
// block valid so that it stays in the cache, in
|
|
|
|
// case we get an upgrade response (and hence no
|
|
|
|
// new data) when the write miss completes.
|
|
|
|
// As long as CPUs do proper store/load forwarding
|
|
|
|
// internally, and have a sufficiently weak memory
|
|
|
|
// model, this is probably unnecessary, but at some
|
|
|
|
// point it must have seemed like we needed it...
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(pkt->needsWritable());
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(!blk->isWritable());
|
|
|
|
blk->status &= ~BlkReadable;
|
|
|
|
}
|
|
|
|
// Here we are using forward_time, modelling the latency of
|
|
|
|
// a miss (outbound) just as forwardLatency, neglecting the
|
|
|
|
// lookupLatency component.
|
|
|
|
allocateMissBuffer(pkt, forward_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prefetcher) {
|
|
|
|
// Don't notify on SWPrefetch
|
|
|
|
if (!pkt->cmd.isSWPrefetch())
|
|
|
|
next_pf_time = prefetcher->notify(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next_pf_time != MaxTick)
|
|
|
|
schedMemSideSendEvent(next_pf_time);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
PacketPtr
|
2016-04-21 10:48:06 +02:00
|
|
|
Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
|
|
|
|
bool needsWritable) const
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
2016-04-21 10:48:06 +02:00
|
|
|
// should never see evictions here
|
|
|
|
assert(!cpu_pkt->isEviction());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-04-21 10:48:06 +02:00
|
|
|
bool blkValid = blk && blk->isValid();
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-04-21 10:48:06 +02:00
|
|
|
if (cpu_pkt->req->isUncacheable() ||
|
|
|
|
(!blkValid && cpu_pkt->isUpgrade())) {
|
|
|
|
// uncacheable requests and upgrades from upper-level caches
|
|
|
|
// that missed completely just go through as is
|
|
|
|
return nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(cpu_pkt->needsResponse());
|
|
|
|
|
|
|
|
MemCmd cmd;
|
|
|
|
// @TODO make useUpgrades a parameter.
|
|
|
|
// Note that ownership protocols require upgrade, otherwise a
|
|
|
|
// write miss on a shared owned block will generate a ReadExcl,
|
|
|
|
// which will clobber the owned copy.
|
|
|
|
const bool useUpgrades = true;
|
|
|
|
if (blkValid && useUpgrades) {
|
2015-12-31 15:32:58 +01:00
|
|
|
// only reason to be here is that blk is read only and we need
|
|
|
|
// it to be writable
|
|
|
|
assert(needsWritable);
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(!blk->isWritable());
|
|
|
|
cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
|
|
|
|
} else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
|
|
|
|
cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
|
|
|
|
// Even though this SC will fail, we still need to send out the
|
|
|
|
// request and get the data to supply it to other snoopers in the case
|
|
|
|
// where the determination the StoreCond fails is delayed due to
|
|
|
|
// all caches not being on the same local bus.
|
|
|
|
cmd = MemCmd::SCUpgradeFailReq;
|
2016-02-24 10:16:57 +01:00
|
|
|
} else if (cpu_pkt->cmd == MemCmd::WriteLineReq ||
|
|
|
|
cpu_pkt->cmd == MemCmd::InvalidateReq) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// forward as invalidate to all other caches, this gives us
|
2015-12-31 15:32:58 +01:00
|
|
|
// the line in Exclusive state, and invalidates all other
|
2015-08-21 13:03:20 +02:00
|
|
|
// copies
|
|
|
|
cmd = MemCmd::InvalidateReq;
|
|
|
|
} else {
|
|
|
|
// block is invalid
|
2015-12-31 15:32:58 +01:00
|
|
|
cmd = needsWritable ? MemCmd::ReadExReq :
|
2015-08-21 13:03:20 +02:00
|
|
|
(isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
|
|
|
|
}
|
|
|
|
PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// if there are upstream caches that have already marked the
|
|
|
|
// packet as having sharers (not passing writable), pass that info
|
|
|
|
// downstream
|
|
|
|
if (cpu_pkt->hasSharers()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// note that cpu_pkt may have spent a considerable time in the
|
|
|
|
// MSHR queue and that the information could possibly be out
|
|
|
|
// of date, however, there is no harm in conservatively
|
2015-12-31 15:32:58 +01:00
|
|
|
// assuming the block has sharers
|
|
|
|
pkt->setHasSharers();
|
|
|
|
DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx "
|
|
|
|
"size %d\n",
|
2015-08-21 13:03:20 +02:00
|
|
|
__func__, cpu_pkt->cmdString(), pkt->cmdString(),
|
|
|
|
pkt->getAddr(), pkt->getSize());
|
|
|
|
}
|
|
|
|
|
|
|
|
// the packet should be block aligned
|
|
|
|
assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
|
|
|
|
|
|
|
|
pkt->allocate();
|
|
|
|
DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n",
|
|
|
|
__func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(),
|
|
|
|
pkt->getSize());
|
|
|
|
return pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Tick
|
|
|
|
Cache::recvAtomic(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// We are in atomic mode so we pay just for lookupLatency here.
|
|
|
|
Cycles lat = lookupLatency;
|
|
|
|
|
|
|
|
// Forward the request if the system is in cache bypass mode.
|
|
|
|
if (system->bypassCaches())
|
|
|
|
return ticksToCycles(memSidePort->sendAtomic(pkt));
|
|
|
|
|
|
|
|
promoteWholeLineWrites(pkt);
|
|
|
|
|
2016-02-10 10:08:24 +01:00
|
|
|
// follow the same flow as in recvTimingReq, and check if a cache
|
|
|
|
// above us is responding
|
2015-12-31 15:32:58 +01:00
|
|
|
if (pkt->cacheResponding()) {
|
2016-02-10 10:08:24 +01:00
|
|
|
DPRINTF(Cache, "Cache above responding to %#llx (%s): "
|
|
|
|
"not responding\n",
|
|
|
|
pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
|
|
|
|
|
|
|
|
// if a cache is responding, and it had the line in Owned
|
|
|
|
// rather than Modified state, we need to invalidate any
|
|
|
|
// copies that are not on the same path to memory
|
2016-02-10 10:08:25 +01:00
|
|
|
assert(pkt->needsWritable() && !pkt->responderHadWritable());
|
|
|
|
lat += ticksToCycles(memSidePort->sendAtomic(pkt));
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
return lat * clockPeriod();
|
|
|
|
}
|
|
|
|
|
|
|
|
// should assert here that there are no outstanding MSHRs or
|
|
|
|
// writebacks... that would mean that someone used an atomic
|
|
|
|
// access in timing mode
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
CacheBlk *blk = nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
PacketList writebacks;
|
|
|
|
bool satisfied = access(pkt, blk, lat, writebacks);
|
|
|
|
|
|
|
|
// handle writebacks resulting from the access here to ensure they
|
|
|
|
// logically proceed anything happening below
|
2015-09-25 13:26:57 +02:00
|
|
|
doWritebacksAtomic(writebacks);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
if (!satisfied) {
|
|
|
|
// MISS
|
|
|
|
|
2016-04-21 10:48:06 +02:00
|
|
|
// deal with the packets that go through the write path of
|
|
|
|
// the cache, i.e. any evictions and uncacheable writes
|
|
|
|
if (pkt->isEviction() ||
|
|
|
|
(pkt->req->isUncacheable() && pkt->isWrite())) {
|
|
|
|
lat += ticksToCycles(memSidePort->sendAtomic(pkt));
|
|
|
|
return lat * clockPeriod();
|
|
|
|
}
|
|
|
|
// only misses left
|
|
|
|
|
|
|
|
PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
bool is_forward = (bus_pkt == nullptr);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
if (is_forward) {
|
|
|
|
// just forwarding the same request to the next level
|
|
|
|
// no local cache operation involved
|
|
|
|
bus_pkt = pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n",
|
|
|
|
bus_pkt->cmdString(), bus_pkt->getAddr(),
|
|
|
|
bus_pkt->isSecure() ? "s" : "ns");
|
|
|
|
|
|
|
|
#if TRACING_ON
|
|
|
|
CacheBlk::State old_state = blk ? blk->status : 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
|
|
|
|
|
2016-04-21 10:48:06 +02:00
|
|
|
bool is_invalidate = bus_pkt->isInvalidate();
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// We are now dealing with the response handling
|
2016-05-26 12:56:24 +02:00
|
|
|
DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in "
|
|
|
|
"state %i\n", bus_pkt->cmdString(), bus_pkt->getAddr(),
|
2015-08-21 13:03:20 +02:00
|
|
|
bus_pkt->isSecure() ? "s" : "ns",
|
|
|
|
old_state);
|
|
|
|
|
|
|
|
// If packet was a forward, the response (if any) is already
|
|
|
|
// in place in the bus_pkt == pkt structure, so we don't need
|
|
|
|
// to do anything. Otherwise, use the separate bus_pkt to
|
|
|
|
// generate response to pkt and then delete it.
|
|
|
|
if (!is_forward) {
|
|
|
|
if (pkt->needsResponse()) {
|
|
|
|
assert(bus_pkt->isResponse());
|
|
|
|
if (bus_pkt->isError()) {
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
pkt->copyError(bus_pkt);
|
|
|
|
} else if (pkt->cmd == MemCmd::WriteLineReq) {
|
|
|
|
// note the use of pkt, not bus_pkt here.
|
|
|
|
|
|
|
|
// write-line request to the cache that promoted
|
|
|
|
// the write to a whole line
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
blk = handleFill(pkt, blk, writebacks,
|
|
|
|
allocOnFill(pkt->cmd));
|
2016-04-21 10:48:06 +02:00
|
|
|
assert(blk != NULL);
|
|
|
|
is_invalidate = false;
|
2016-08-12 15:11:45 +02:00
|
|
|
satisfyRequest(pkt, blk);
|
2015-08-21 13:03:20 +02:00
|
|
|
} else if (bus_pkt->isRead() ||
|
|
|
|
bus_pkt->cmd == MemCmd::UpgradeResp) {
|
|
|
|
// we're updating cache state to allow us to
|
|
|
|
// satisfy the upstream request from the cache
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
blk = handleFill(bus_pkt, blk, writebacks,
|
|
|
|
allocOnFill(pkt->cmd));
|
2016-08-12 15:11:45 +02:00
|
|
|
satisfyRequest(pkt, blk);
|
|
|
|
maintainClusivity(pkt->fromCache(), blk);
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
|
|
|
// we're satisfying the upstream request without
|
|
|
|
// modifying cache state, e.g., a write-through
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete bus_pkt;
|
|
|
|
}
|
2016-04-21 10:48:06 +02:00
|
|
|
|
|
|
|
if (is_invalidate && blk && blk->isValid()) {
|
|
|
|
invalidateBlock(blk);
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note that we don't invoke the prefetcher at all in atomic mode.
|
|
|
|
// It's not clear how to do it properly, particularly for
|
|
|
|
// prefetchers that aggressively generate prefetch candidates and
|
|
|
|
// rely on bandwidth contention to throttle them; these will tend
|
|
|
|
// to pollute the cache in atomic mode since there is no bandwidth
|
|
|
|
// contention. If we ever do want to enable prefetching in atomic
|
|
|
|
// mode, though, this is the place to do it... see timingAccess()
|
|
|
|
// for an example (though we'd want to issue the prefetch(es)
|
|
|
|
// immediately rather than calling requestMemSideBus() as we do
|
|
|
|
// there).
|
|
|
|
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
// do any writebacks resulting from the response handling
|
2015-09-25 13:26:57 +02:00
|
|
|
doWritebacksAtomic(writebacks);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
// if we used temp block, check to see if its valid and if so
|
|
|
|
// clear it out, but only do so after the call to recvAtomic is
|
|
|
|
// finished so that any downstream observers (such as a snoop
|
|
|
|
// filter), first see the fill, and only then see the eviction
|
|
|
|
if (blk == tempBlock && tempBlock->isValid()) {
|
|
|
|
// the atomic CPU calls recvAtomic for fetch and load/store
|
|
|
|
// sequentuially, and we may already have a tempBlock
|
|
|
|
// writeback from the fetch that we have not yet sent
|
|
|
|
if (tempBlockWriteback) {
|
|
|
|
// if that is the case, write the prevoius one back, and
|
|
|
|
// do not schedule any new event
|
|
|
|
writebackTempBlockAtomic();
|
|
|
|
} else {
|
|
|
|
// the writeback/clean eviction happens after the call to
|
|
|
|
// recvAtomic has finished (but before any successive
|
|
|
|
// calls), so that the response handling from the fill is
|
|
|
|
// allowed to happen first
|
|
|
|
schedule(writebackTempBlockAtomicEvent, curTick());
|
|
|
|
}
|
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
tempBlockWriteback = (blk->isDirty() || writebackClean) ?
|
|
|
|
writebackBlk(blk) : cleanEvictBlk(blk);
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
blk->invalidate();
|
|
|
|
}
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
if (pkt->needsResponse()) {
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
}
|
|
|
|
|
|
|
|
return lat * clockPeriod();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
|
|
|
|
{
|
|
|
|
if (system->bypassCaches()) {
|
|
|
|
// Packets from the memory side are snoop request and
|
|
|
|
// shouldn't happen in bypass mode.
|
|
|
|
assert(fromCpuSide);
|
|
|
|
|
|
|
|
// The cache should be flushed if we are in cache bypass mode,
|
|
|
|
// so we don't need to check if we need to update anything.
|
|
|
|
memSidePort->sendFunctional(pkt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Addr blk_addr = blockAlign(pkt->getAddr());
|
|
|
|
bool is_secure = pkt->isSecure();
|
|
|
|
CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
|
|
|
|
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
|
|
|
|
|
|
|
|
pkt->pushLabel(name());
|
|
|
|
|
|
|
|
CacheBlkPrintWrapper cbpw(blk);
|
|
|
|
|
|
|
|
// Note that just because an L2/L3 has valid data doesn't mean an
|
|
|
|
// L1 doesn't have a more up-to-date modified copy that still
|
|
|
|
// needs to be found. As a result we always update the request if
|
|
|
|
// we have it, but only declare it satisfied if we are the owner.
|
|
|
|
|
|
|
|
// see if we have data at all (owned or otherwise)
|
|
|
|
bool have_data = blk && blk->isValid()
|
|
|
|
&& pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
|
|
|
|
blk->data);
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// data we have is dirty if marked as such or if we have an
|
|
|
|
// in-service MSHR that is pending a modified line
|
2015-08-21 13:03:20 +02:00
|
|
|
bool have_dirty =
|
|
|
|
have_data && (blk->isDirty() ||
|
2015-12-31 15:32:58 +01:00
|
|
|
(mshr && mshr->inService && mshr->isPendingModified()));
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
bool done = have_dirty
|
|
|
|
|| cpuSidePort->checkFunctional(pkt)
|
|
|
|
|| mshrQueue.checkFunctional(pkt, blk_addr)
|
|
|
|
|| writeBuffer.checkFunctional(pkt, blk_addr)
|
|
|
|
|| memSidePort->checkFunctional(pkt);
|
|
|
|
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "functional %s %#llx (%s) %s%s%s\n",
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
|
|
|
|
(blk && blk->isValid()) ? "valid " : "",
|
|
|
|
have_data ? "data " : "", done ? "done " : "");
|
|
|
|
|
|
|
|
// We're leaving the cache, so pop cache->name() label
|
|
|
|
pkt->popLabel();
|
|
|
|
|
|
|
|
if (done) {
|
|
|
|
pkt->makeResponse();
|
|
|
|
} else {
|
|
|
|
// if it came as a request from the CPU side then make sure it
|
|
|
|
// continues towards the memory side
|
|
|
|
if (fromCpuSide) {
|
|
|
|
memSidePort->sendFunctional(pkt);
|
2016-05-26 12:56:24 +02:00
|
|
|
} else if (cpuSidePort->isSnooping()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// if it came from the memory side, it must be a snoop request
|
|
|
|
// and we should only forward it if we are forwarding snoops
|
|
|
|
cpuSidePort->sendFunctionalSnoop(pkt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Response handling: responses from the memory side
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
void
|
|
|
|
Cache::handleUncacheableWriteResp(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
Tick completion_time = clockEdge(responseLatency) +
|
|
|
|
pkt->headerDelay + pkt->payloadDelay;
|
|
|
|
|
2016-04-21 10:48:07 +02:00
|
|
|
// Reset the bus additional time as it is now accounted for
|
|
|
|
pkt->headerDelay = pkt->payloadDelay = 0;
|
2016-03-17 14:51:18 +01:00
|
|
|
|
2016-04-21 10:48:07 +02:00
|
|
|
cpuSidePort->schedTimingResp(pkt, completion_time, true);
|
2016-03-17 14:51:18 +01:00
|
|
|
}
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
void
|
|
|
|
Cache::recvTimingResp(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
assert(pkt->isResponse());
|
|
|
|
|
|
|
|
// all header delay should be paid for by the crossbar, unless
|
|
|
|
// this is a prefetch response from above
|
|
|
|
panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
|
|
|
|
"%s saw a non-zero packet delay\n", name());
|
|
|
|
|
|
|
|
bool is_error = pkt->isError();
|
|
|
|
|
|
|
|
if (is_error) {
|
|
|
|
DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
|
|
|
|
"cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
|
|
|
|
pkt->cmdString());
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
|
|
|
|
pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
|
|
|
|
pkt->isSecure() ? "s" : "ns");
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// if this is a write, we should be looking at an uncacheable
|
|
|
|
// write
|
|
|
|
if (pkt->isWrite()) {
|
|
|
|
assert(pkt->req->isUncacheable());
|
|
|
|
handleUncacheableWriteResp(pkt);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we have dealt with any (uncacheable) writes above, from here on
|
|
|
|
// we know we are dealing with an MSHR due to a miss or a prefetch
|
2016-04-21 10:48:07 +02:00
|
|
|
MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
|
2016-03-17 14:51:18 +01:00
|
|
|
assert(mshr);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
if (mshr == noTargetMSHR) {
|
|
|
|
// we always clear at least one target
|
|
|
|
clearBlocked(Blocked_NoTargets);
|
2016-05-26 12:56:24 +02:00
|
|
|
noTargetMSHR = nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initial target is used just for stats
|
|
|
|
MSHR::Target *initial_tgt = mshr->getTarget();
|
|
|
|
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
|
|
|
|
Tick miss_latency = curTick() - initial_tgt->recvTime;
|
|
|
|
|
|
|
|
if (pkt->req->isUncacheable()) {
|
|
|
|
assert(pkt->req->masterId() < system->maxMasters());
|
|
|
|
mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
|
|
|
|
miss_latency;
|
|
|
|
} else {
|
|
|
|
assert(pkt->req->masterId() < system->maxMasters());
|
|
|
|
mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
|
|
|
|
miss_latency;
|
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
bool wasFull = mshrQueue.isFull();
|
|
|
|
|
|
|
|
PacketList writebacks;
|
|
|
|
|
|
|
|
Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// upgrade deferred targets if the response has no sharers, and is
|
|
|
|
// thus passing writable
|
|
|
|
if (!pkt->hasSharers()) {
|
|
|
|
mshr->promoteWritable();
|
2015-10-29 13:48:20 +01:00
|
|
|
}
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
bool is_fill = !mshr->isForward &&
|
|
|
|
(pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
|
|
|
|
|
2015-10-29 13:48:20 +01:00
|
|
|
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
if (is_fill && !is_error) {
|
|
|
|
DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
|
|
|
|
pkt->getAddr());
|
|
|
|
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
blk = handleFill(pkt, blk, writebacks, mshr->allocOnFill);
|
2016-05-26 12:56:24 +02:00
|
|
|
assert(blk != nullptr);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allow invalidation responses originating from write-line
|
|
|
|
// requests to be discarded
|
2015-09-25 13:26:58 +02:00
|
|
|
bool is_invalidate = pkt->isInvalidate();
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// First offset for critical word first calculations
|
|
|
|
int initial_offset = initial_tgt->pkt->getOffset(blkSize);
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
bool from_cache = false;
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
while (mshr->hasTargets()) {
|
|
|
|
MSHR::Target *target = mshr->getTarget();
|
|
|
|
Packet *tgt_pkt = target->pkt;
|
|
|
|
|
|
|
|
switch (target->source) {
|
|
|
|
case MSHR::Target::FromCPU:
|
|
|
|
Tick completion_time;
|
|
|
|
// Here we charge on completion_time the delay of the xbar if the
|
|
|
|
// packet comes from it, charged on headerDelay.
|
|
|
|
completion_time = pkt->headerDelay;
|
|
|
|
|
|
|
|
// Software prefetch handling for cache closest to core
|
|
|
|
if (tgt_pkt->cmd.isSWPrefetch()) {
|
2016-05-26 12:56:24 +02:00
|
|
|
// a software prefetch would have already been ack'd
|
|
|
|
// immediately with dummy data so the core would be able to
|
|
|
|
// retire it. This request completes right here, so we
|
|
|
|
// deallocate it.
|
2015-08-21 13:03:20 +02:00
|
|
|
delete tgt_pkt->req;
|
|
|
|
delete tgt_pkt;
|
|
|
|
break; // skip response
|
|
|
|
}
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
// keep track of whether we have responded to another
|
|
|
|
// cache
|
|
|
|
from_cache = from_cache || tgt_pkt->fromCache();
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// unlike the other packet flows, where data is found in other
|
|
|
|
// caches or memory and brought back, write-line requests always
|
|
|
|
// have the data right away, so the above check for "is fill?"
|
|
|
|
// cannot actually be determined until examining the stored MSHR
|
|
|
|
// state. We "catch up" with that logic here, which is duplicated
|
|
|
|
// from above.
|
|
|
|
if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
|
|
|
|
assert(!is_error);
|
2015-12-31 15:32:58 +01:00
|
|
|
// we got the block in a writable state, so promote
|
|
|
|
// any deferred targets if possible
|
|
|
|
mshr->promoteWritable();
|
2015-08-21 13:03:20 +02:00
|
|
|
// NB: we use the original packet here and not the response!
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
|
2016-05-26 12:56:24 +02:00
|
|
|
assert(blk != nullptr);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// treat as a fill, and discard the invalidation
|
|
|
|
// response
|
|
|
|
is_fill = true;
|
2015-09-25 13:26:58 +02:00
|
|
|
is_invalidate = false;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (is_fill) {
|
2016-08-12 15:11:45 +02:00
|
|
|
satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// How many bytes past the first request is this one
|
|
|
|
int transfer_offset =
|
|
|
|
tgt_pkt->getOffset(blkSize) - initial_offset;
|
|
|
|
if (transfer_offset < 0) {
|
|
|
|
transfer_offset += blkSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If not critical word (offset) return payloadDelay.
|
|
|
|
// responseLatency is the latency of the return path
|
|
|
|
// from lower level caches/memory to an upper level cache or
|
|
|
|
// the core.
|
|
|
|
completion_time += clockEdge(responseLatency) +
|
|
|
|
(transfer_offset ? pkt->payloadDelay : 0);
|
|
|
|
|
|
|
|
assert(!tgt_pkt->req->isUncacheable());
|
|
|
|
|
|
|
|
assert(tgt_pkt->req->masterId() < system->maxMasters());
|
|
|
|
missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
|
|
|
|
completion_time - target->recvTime;
|
|
|
|
} else if (pkt->cmd == MemCmd::UpgradeFailResp) {
|
|
|
|
// failed StoreCond upgrade
|
|
|
|
assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
|
|
|
|
tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
|
|
|
|
tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
|
|
|
|
// responseLatency is the latency of the return path
|
|
|
|
// from lower level caches/memory to an upper level cache or
|
|
|
|
// the core.
|
|
|
|
completion_time += clockEdge(responseLatency) +
|
|
|
|
pkt->payloadDelay;
|
|
|
|
tgt_pkt->req->setExtraData(0);
|
|
|
|
} else {
|
|
|
|
// not a cache fill, just forwarding response
|
|
|
|
// responseLatency is the latency of the return path
|
|
|
|
// from lower level cahces/memory to the core.
|
|
|
|
completion_time += clockEdge(responseLatency) +
|
|
|
|
pkt->payloadDelay;
|
|
|
|
if (pkt->isRead() && !is_error) {
|
|
|
|
// sanity check
|
|
|
|
assert(pkt->getAddr() == tgt_pkt->getAddr());
|
|
|
|
assert(pkt->getSize() >= tgt_pkt->getSize());
|
|
|
|
|
|
|
|
tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tgt_pkt->makeTimingResponse();
|
|
|
|
// if this packet is an error copy that to the new packet
|
|
|
|
if (is_error)
|
|
|
|
tgt_pkt->copyError(pkt);
|
|
|
|
if (tgt_pkt->cmd == MemCmd::ReadResp &&
|
2015-09-25 13:26:58 +02:00
|
|
|
(is_invalidate || mshr->hasPostInvalidate())) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// If intermediate cache got ReadRespWithInvalidate,
|
|
|
|
// propagate that. Response should not have
|
|
|
|
// isInvalidate() set otherwise.
|
|
|
|
tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
|
|
|
|
DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
|
|
|
|
__func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
|
|
|
|
}
|
|
|
|
// Reset the bus additional time as it is now accounted for
|
|
|
|
tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
|
2015-11-06 09:26:37 +01:00
|
|
|
cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
|
2015-08-21 13:03:20 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MSHR::Target::FromPrefetcher:
|
|
|
|
assert(tgt_pkt->cmd == MemCmd::HardPFReq);
|
|
|
|
if (blk)
|
|
|
|
blk->status |= BlkHWPrefetched;
|
|
|
|
delete tgt_pkt->req;
|
|
|
|
delete tgt_pkt;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MSHR::Target::FromSnoop:
|
|
|
|
// I don't believe that a snoop can be in an error state
|
|
|
|
assert(!is_error);
|
|
|
|
// response to snoop request
|
|
|
|
DPRINTF(Cache, "processing deferred snoop...\n");
|
2015-09-25 13:26:58 +02:00
|
|
|
assert(!(is_invalidate && !mshr->hasPostInvalidate()));
|
2015-08-21 13:03:20 +02:00
|
|
|
handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("Illegal target->source enum %d\n", target->source);
|
|
|
|
}
|
|
|
|
|
|
|
|
mshr->popTarget();
|
|
|
|
}
|
|
|
|
|
2016-08-12 15:11:45 +02:00
|
|
|
maintainClusivity(from_cache, blk);
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk && blk->isValid()) {
|
|
|
|
// an invalidate response stemming from a write line request
|
|
|
|
// should not invalidate the block, so check if the
|
|
|
|
// invalidation should be discarded
|
2015-09-25 13:26:58 +02:00
|
|
|
if (is_invalidate || mshr->hasPostInvalidate()) {
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
invalidateBlock(blk);
|
2015-08-21 13:03:20 +02:00
|
|
|
} else if (mshr->hasPostDowngrade()) {
|
|
|
|
blk->status &= ~BlkWritable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mshr->promoteDeferredTargets()) {
|
|
|
|
// avoid later read getting stale data while write miss is
|
|
|
|
// outstanding.. see comment in timingAccess()
|
|
|
|
if (blk) {
|
|
|
|
blk->status &= ~BlkReadable;
|
|
|
|
}
|
2016-03-17 14:51:18 +01:00
|
|
|
mshrQueue.markPending(mshr);
|
2015-08-21 13:03:20 +02:00
|
|
|
schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
|
|
|
|
} else {
|
2016-03-17 14:51:18 +01:00
|
|
|
mshrQueue.deallocate(mshr);
|
|
|
|
if (wasFull && !mshrQueue.isFull()) {
|
|
|
|
clearBlocked(Blocked_NoMSHRs);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Request the bus for a prefetch if this deallocation freed enough
|
|
|
|
// MSHRs for a prefetch to take place
|
2016-03-17 14:51:18 +01:00
|
|
|
if (prefetcher && mshrQueue.canPrefetch()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
|
|
|
|
clockEdge());
|
|
|
|
if (next_pf_time != MaxTick)
|
|
|
|
schedMemSideSendEvent(next_pf_time);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// reset the xbar additional timinig as it is now accounted for
|
|
|
|
pkt->headerDelay = pkt->payloadDelay = 0;
|
|
|
|
|
|
|
|
// copy writebacks to write buffer
|
|
|
|
doWritebacks(writebacks, forward_time);
|
|
|
|
|
|
|
|
// if we used temp block, check to see if its valid and then clear it out
|
|
|
|
if (blk == tempBlock && tempBlock->isValid()) {
|
|
|
|
// We use forwardLatency here because we are copying
|
|
|
|
// Writebacks/CleanEvicts to write buffer. It specifies the latency to
|
|
|
|
// allocate an internal buffer and to schedule an event to the
|
|
|
|
// queued port.
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (blk->isDirty() || writebackClean) {
|
2015-08-21 13:03:20 +02:00
|
|
|
PacketPtr wbPkt = writebackBlk(blk);
|
|
|
|
allocateWriteBuffer(wbPkt, forward_time);
|
|
|
|
// Set BLOCK_CACHED flag if cached above.
|
|
|
|
if (isCachedAbove(wbPkt))
|
|
|
|
wbPkt->setBlockCached();
|
|
|
|
} else {
|
|
|
|
PacketPtr wcPkt = cleanEvictBlk(blk);
|
|
|
|
// Check to see if block is cached above. If not allocate
|
|
|
|
// write buffer
|
|
|
|
if (isCachedAbove(wcPkt))
|
|
|
|
delete wcPkt;
|
|
|
|
else
|
|
|
|
allocateWriteBuffer(wcPkt, forward_time);
|
|
|
|
}
|
|
|
|
blk->invalidate();
|
|
|
|
}
|
|
|
|
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "Leaving %s with %s for addr %#llx\n", __func__,
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmdString(), pkt->getAddr());
|
|
|
|
delete pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
PacketPtr
|
|
|
|
Cache::writebackBlk(CacheBlk *blk)
|
|
|
|
{
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
chatty_assert(!isReadOnly || writebackClean,
|
|
|
|
"Writeback from read-only cache");
|
|
|
|
assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
writebacks[Request::wbMasterId]++;
|
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
Request *req = new Request(tags->regenerateBlkAddr(blk->tag, blk->set),
|
|
|
|
blkSize, 0, Request::wbMasterId);
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk->isSecure())
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
req->setFlags(Request::SECURE);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
req->taskId(blk->task_id);
|
2015-08-21 13:03:20 +02:00
|
|
|
blk->task_id= ContextSwitchTaskId::Unknown;
|
|
|
|
blk->tickInserted = curTick();
|
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
PacketPtr pkt =
|
|
|
|
new Packet(req, blk->isDirty() ?
|
|
|
|
MemCmd::WritebackDirty : MemCmd::WritebackClean);
|
|
|
|
|
|
|
|
DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
|
|
|
|
pkt->getAddr(), blk->isWritable(), blk->isDirty());
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
if (blk->isWritable()) {
|
|
|
|
// not asserting shared means we pass the block in modified
|
|
|
|
// state, mark our own block non-writeable
|
|
|
|
blk->status &= ~BlkWritable;
|
|
|
|
} else {
|
2015-12-31 15:32:58 +01:00
|
|
|
// we are in the Owned state, tell the receiver
|
|
|
|
pkt->setHasSharers();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
// make sure the block is not marked dirty
|
2015-08-21 13:03:20 +02:00
|
|
|
blk->status &= ~BlkDirty;
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
|
|
|
|
pkt->allocate();
|
|
|
|
std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
|
|
|
|
|
|
|
|
return pkt;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
PacketPtr
|
|
|
|
Cache::cleanEvictBlk(CacheBlk *blk)
|
|
|
|
{
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(!writebackClean);
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(blk && blk->isValid() && !blk->isDirty());
|
|
|
|
// Creating a zero sized write, a message to the snoop filter
|
|
|
|
Request *req =
|
|
|
|
new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
|
|
|
|
Request::wbMasterId);
|
|
|
|
if (blk->isSecure())
|
|
|
|
req->setFlags(Request::SECURE);
|
|
|
|
|
|
|
|
req->taskId(blk->task_id);
|
|
|
|
blk->task_id = ContextSwitchTaskId::Unknown;
|
|
|
|
blk->tickInserted = curTick();
|
|
|
|
|
|
|
|
PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
|
|
|
|
pkt->allocate();
|
|
|
|
DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(),
|
|
|
|
pkt->req->isInstFetch() ? " (ifetch)" : "",
|
|
|
|
pkt->getAddr());
|
|
|
|
|
|
|
|
return pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::memWriteback()
|
|
|
|
{
|
|
|
|
CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
|
|
|
|
tags->forEachBlk(visitor);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::memInvalidate()
|
|
|
|
{
|
|
|
|
CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
|
|
|
|
tags->forEachBlk(visitor);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::isDirty() const
|
|
|
|
{
|
|
|
|
CacheBlkIsDirtyVisitor visitor;
|
|
|
|
tags->forEachBlk(visitor);
|
|
|
|
|
|
|
|
return visitor.isDirty();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::writebackVisitor(CacheBlk &blk)
|
|
|
|
{
|
|
|
|
if (blk.isDirty()) {
|
|
|
|
assert(blk.isValid());
|
|
|
|
|
|
|
|
Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
|
|
|
|
blkSize, 0, Request::funcMasterId);
|
|
|
|
request.taskId(blk.task_id);
|
|
|
|
|
|
|
|
Packet packet(&request, MemCmd::WriteReq);
|
|
|
|
packet.dataStatic(blk.data);
|
|
|
|
|
|
|
|
memSidePort->sendFunctional(&packet);
|
|
|
|
|
|
|
|
blk.status &= ~BlkDirty;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::invalidateVisitor(CacheBlk &blk)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (blk.isDirty())
|
|
|
|
warn_once("Invalidating dirty cache lines. Expect things to break.\n");
|
|
|
|
|
|
|
|
if (blk.isValid()) {
|
|
|
|
assert(!blk.isDirty());
|
|
|
|
tags->invalidate(&blk);
|
|
|
|
blk.invalidate();
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
CacheBlk*
|
|
|
|
Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
|
|
|
|
{
|
|
|
|
CacheBlk *blk = tags->findVictim(addr);
|
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
// It is valid to return nullptr if there is no victim
|
2015-08-21 13:03:20 +02:00
|
|
|
if (!blk)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (blk->isValid()) {
|
|
|
|
Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
|
|
|
|
MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
|
|
|
|
if (repl_mshr) {
|
|
|
|
// must be an outstanding upgrade request
|
|
|
|
// on a block we're about to replace...
|
|
|
|
assert(!blk->isWritable() || blk->isDirty());
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(repl_mshr->needsWritable());
|
2015-08-21 13:03:20 +02:00
|
|
|
// too hard to replace block with transient state
|
|
|
|
// allocation failed, block not inserted
|
2016-05-26 12:56:24 +02:00
|
|
|
return nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
2016-05-26 12:56:24 +02:00
|
|
|
DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx "
|
|
|
|
"(%s): %s\n", repl_addr, blk->isSecure() ? "s" : "ns",
|
2015-08-21 13:03:20 +02:00
|
|
|
addr, is_secure ? "s" : "ns",
|
|
|
|
blk->isDirty() ? "writeback" : "clean");
|
|
|
|
|
2015-05-27 14:50:01 +02:00
|
|
|
if (blk->wasPrefetched()) {
|
|
|
|
unusedPrefetches++;
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
// Will send up Writeback/CleanEvict snoops via isCachedAbove
|
|
|
|
// when pushing this writeback list into the write buffer.
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (blk->isDirty() || writebackClean) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// Save writeback packet for handling by caller
|
|
|
|
writebacks.push_back(writebackBlk(blk));
|
|
|
|
} else {
|
|
|
|
writebacks.push_back(cleanEvictBlk(blk));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
void
|
|
|
|
Cache::invalidateBlock(CacheBlk *blk)
|
|
|
|
{
|
|
|
|
if (blk != tempBlock)
|
|
|
|
tags->invalidate(blk);
|
|
|
|
blk->invalidate();
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// Note that the reason we return a list of writebacks rather than
|
|
|
|
// inserting them directly in the write buffer is that this function
|
|
|
|
// is called by both atomic and timing-mode accesses, and in atomic
|
|
|
|
// mode we don't mess with the write buffer (we just perform the
|
|
|
|
// writebacks atomically once the original request is complete).
|
|
|
|
CacheBlk*
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
|
|
|
|
bool allocate)
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
|
|
|
assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
|
|
|
|
Addr addr = pkt->getAddr();
|
|
|
|
bool is_secure = pkt->isSecure();
|
|
|
|
#if TRACING_ON
|
|
|
|
CacheBlk::State old_state = blk ? blk->status : 0;
|
|
|
|
#endif
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// When handling a fill, we should have no writes to this line.
|
|
|
|
assert(addr == blockAlign(addr));
|
|
|
|
assert(!writeBuffer.findMatch(addr, is_secure));
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
if (blk == nullptr) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// better have read new data...
|
|
|
|
assert(pkt->hasData());
|
|
|
|
|
|
|
|
// only read responses and write-line requests have data;
|
|
|
|
// note that we don't write the data here for write-line - that
|
2016-08-12 15:11:45 +02:00
|
|
|
// happens in the subsequent call to satisfyRequest
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(pkt->isRead() || pkt->cmd == MemCmd::WriteLineReq);
|
|
|
|
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
// need to do a replacement if allocating, otherwise we stick
|
|
|
|
// with the temporary storage
|
2016-05-26 12:56:24 +02:00
|
|
|
blk = allocate ? allocateBlock(addr, is_secure, writebacks) : nullptr;
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
if (blk == nullptr) {
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
// No replaceable block or a mostly exclusive
|
|
|
|
// cache... just use temporary storage to complete the
|
|
|
|
// current request and then get rid of it
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(!tempBlock->isValid());
|
|
|
|
blk = tempBlock;
|
|
|
|
tempBlock->set = tags->extractSet(addr);
|
|
|
|
tempBlock->tag = tags->extractTag(addr);
|
|
|
|
// @todo: set security state as well...
|
|
|
|
DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
|
|
|
|
is_secure ? "s" : "ns");
|
|
|
|
} else {
|
|
|
|
tags->insertBlock(pkt, blk);
|
|
|
|
}
|
|
|
|
|
|
|
|
// we should never be overwriting a valid block
|
|
|
|
assert(!blk->isValid());
|
|
|
|
} else {
|
|
|
|
// existing block... probably an upgrade
|
|
|
|
assert(blk->tag == tags->extractTag(addr));
|
|
|
|
// either we're getting new data or the block should already be valid
|
|
|
|
assert(pkt->hasData() || blk->isValid());
|
|
|
|
// don't clear block status... if block is already dirty we
|
|
|
|
// don't want to lose that
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_secure)
|
|
|
|
blk->status |= BlkSecure;
|
|
|
|
blk->status |= BlkValid | BlkReadable;
|
|
|
|
|
2015-09-25 13:26:58 +02:00
|
|
|
// sanity check for whole-line writes, which should always be
|
|
|
|
// marked as writable as part of the fill, and then later marked
|
2016-08-12 15:11:45 +02:00
|
|
|
// dirty as part of satisfyRequest
|
2015-09-25 13:26:58 +02:00
|
|
|
if (pkt->cmd == MemCmd::WriteLineReq) {
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(!pkt->hasSharers());
|
2015-09-25 13:26:58 +02:00
|
|
|
// at the moment other caches do not respond to the
|
|
|
|
// invalidation requests corresponding to a whole-line write
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(!pkt->cacheResponding());
|
|
|
|
}
|
|
|
|
|
|
|
|
// here we deal with setting the appropriate state of the line,
|
|
|
|
// and we start by looking at the hasSharers flag, and ignore the
|
|
|
|
// cacheResponding flag (normally signalling dirty data) if the
|
|
|
|
// packet has sharers, thus the line is never allocated as Owned
|
|
|
|
// (dirty but not writable), and always ends up being either
|
|
|
|
// Shared, Exclusive or Modified, see Packet::setCacheResponding
|
|
|
|
// for more details
|
|
|
|
if (!pkt->hasSharers()) {
|
|
|
|
// we could get a writable line from memory (rather than a
|
|
|
|
// cache) even in a read-only cache, note that we set this bit
|
|
|
|
// even for a read-only cache, possibly revisit this decision
|
2015-08-21 13:03:20 +02:00
|
|
|
blk->status |= BlkWritable;
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// check if we got this via cache-to-cache transfer (i.e., from a
|
|
|
|
// cache that had the block in Modified or Owned state)
|
|
|
|
if (pkt->cacheResponding()) {
|
|
|
|
// we got the block in Modified state, and invalidated the
|
|
|
|
// owners copy
|
2015-08-21 13:03:20 +02:00
|
|
|
blk->status |= BlkDirty;
|
|
|
|
|
|
|
|
chatty_assert(!isReadOnly, "Should never see dirty snoop response "
|
|
|
|
"in read-only cache %s\n", name());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
|
|
|
|
addr, is_secure ? "s" : "ns", old_state, blk->print());
|
|
|
|
|
|
|
|
// if we got new data, copy it in (checking for a read response
|
|
|
|
// and a response that has data is the same in the end)
|
|
|
|
if (pkt->isRead()) {
|
|
|
|
// sanity checks
|
|
|
|
assert(pkt->hasData());
|
|
|
|
assert(pkt->getSize() == blkSize);
|
|
|
|
|
|
|
|
std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
|
|
|
|
}
|
|
|
|
// We pay for fillLatency here.
|
|
|
|
blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
|
|
|
|
pkt->payloadDelay;
|
|
|
|
|
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Snoop path: requests coming in from the memory side
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
|
|
|
|
bool already_copied, bool pending_inval)
|
|
|
|
{
|
|
|
|
// sanity check
|
|
|
|
assert(req_pkt->isRequest());
|
|
|
|
assert(req_pkt->needsResponse());
|
|
|
|
|
|
|
|
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
|
|
|
|
req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
|
|
|
|
// timing-mode snoop responses require a new packet, unless we
|
|
|
|
// already made a copy...
|
|
|
|
PacketPtr pkt = req_pkt;
|
|
|
|
if (!already_copied)
|
|
|
|
// do not clear flags, and allocate space for data if the
|
|
|
|
// packet needs it (the only packets that carry data are read
|
|
|
|
// responses)
|
|
|
|
pkt = new Packet(req_pkt, false, req_pkt->isRead());
|
|
|
|
|
|
|
|
assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
|
2015-12-31 15:32:58 +01:00
|
|
|
pkt->hasSharers());
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->makeTimingResponse();
|
|
|
|
if (pkt->isRead()) {
|
|
|
|
pkt->setDataFromBlock(blk_data, blkSize);
|
|
|
|
}
|
|
|
|
if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
|
|
|
|
// Assume we defer a response to a read from a far-away cache
|
|
|
|
// A, then later defer a ReadExcl from a cache B on the same
|
2015-12-31 15:32:58 +01:00
|
|
|
// bus as us. We'll assert cacheResponding in both cases, but
|
|
|
|
// in the latter case cacheResponding will keep the
|
|
|
|
// invalidation from reaching cache A. This special response
|
|
|
|
// tells cache A that it gets the block to satisfy its read,
|
|
|
|
// but must immediately invalidate it.
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmd = MemCmd::ReadRespWithInvalidate;
|
|
|
|
}
|
|
|
|
// Here we consider forward_time, paying for just forward latency and
|
|
|
|
// also charging the delay provided by the xbar.
|
|
|
|
// forward_time is used as send_time in next allocateWriteBuffer().
|
|
|
|
Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
|
|
|
|
// Here we reset the timing of the packet.
|
|
|
|
pkt->headerDelay = pkt->payloadDelay = 0;
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose,
|
|
|
|
"%s created response: %s addr %#llx size %d tick: %lu\n",
|
2015-08-21 13:03:20 +02:00
|
|
|
__func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
|
|
|
|
forward_time);
|
|
|
|
memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
|
|
|
|
}
|
|
|
|
|
2015-09-25 13:13:54 +02:00
|
|
|
uint32_t
|
2015-08-21 13:03:20 +02:00
|
|
|
Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
|
|
|
|
bool is_deferred, bool pending_inval)
|
|
|
|
{
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
|
|
|
// deferred snoops can only happen in timing mode
|
|
|
|
assert(!(is_deferred && !is_timing));
|
|
|
|
// pending_inval only makes sense on deferred snoops
|
|
|
|
assert(!(pending_inval && !is_deferred));
|
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
|
|
|
// the packet may get modified if we or a forwarded snooper
|
|
|
|
// responds in atomic mode, so remember a few things about the
|
|
|
|
// original packet up front
|
|
|
|
bool invalidate = pkt->isInvalidate();
|
2015-12-31 15:32:58 +01:00
|
|
|
bool M5_VAR_USED needs_writable = pkt->needsWritable();
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2015-12-31 15:33:25 +01:00
|
|
|
// at the moment we could get an uncacheable write which does not
|
|
|
|
// have the invalidate flag, and we need a suitable way of dealing
|
|
|
|
// with this case
|
|
|
|
panic_if(invalidate && pkt->req->isUncacheable(),
|
|
|
|
"%s got an invalidating uncacheable snoop request %s to %#llx",
|
|
|
|
name(), pkt->cmdString(), pkt->getAddr());
|
|
|
|
|
2015-09-25 13:13:54 +02:00
|
|
|
uint32_t snoop_delay = 0;
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
if (forwardSnoops) {
|
|
|
|
// first propagate snoop upward to see if anyone above us wants to
|
|
|
|
// handle it. save & restore packet src since it will get
|
|
|
|
// rewritten to be relative to cpu-side bus (if any)
|
2015-12-31 15:32:58 +01:00
|
|
|
bool alreadyResponded = pkt->cacheResponding();
|
2015-08-21 13:03:20 +02:00
|
|
|
if (is_timing) {
|
|
|
|
// copy the packet so that we can clear any flags before
|
|
|
|
// forwarding it upwards, we also allocate data (passing
|
|
|
|
// the pointer along in case of static data), in case
|
|
|
|
// there is a snoop hit in upper levels
|
|
|
|
Packet snoopPkt(pkt, true, true);
|
|
|
|
snoopPkt.setExpressSnoop();
|
|
|
|
// the snoop packet does not need to wait any additional
|
|
|
|
// time
|
|
|
|
snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
|
|
|
|
cpuSidePort->sendTimingSnoopReq(&snoopPkt);
|
2015-09-25 13:13:54 +02:00
|
|
|
|
|
|
|
// add the header delay (including crossbar and snoop
|
|
|
|
// delays) of the upward snoop to the snoop delay for this
|
|
|
|
// cache
|
|
|
|
snoop_delay += snoopPkt.headerDelay;
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
if (snoopPkt.cacheResponding()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// cache-to-cache response from some upper cache
|
|
|
|
assert(!alreadyResponded);
|
2015-12-31 15:32:58 +01:00
|
|
|
pkt->setCacheResponding();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
2015-12-31 15:32:58 +01:00
|
|
|
// upstream cache has the block, or has an outstanding
|
|
|
|
// MSHR, pass the flag on
|
|
|
|
if (snoopPkt.hasSharers()) {
|
|
|
|
pkt->setHasSharers();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
// If this request is a prefetch or clean evict and an upper level
|
|
|
|
// signals block present, make sure to propagate the block
|
|
|
|
// presence to the requester.
|
|
|
|
if (snoopPkt.isBlockCached()) {
|
|
|
|
pkt->setBlockCached();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cpuSidePort->sendAtomicSnoop(pkt);
|
2015-12-31 15:32:58 +01:00
|
|
|
if (!alreadyResponded && pkt->cacheResponding()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// cache-to-cache response from some upper cache:
|
|
|
|
// forward response to original requester
|
|
|
|
assert(pkt->isResponse());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!blk || !blk->isValid()) {
|
2016-05-26 12:56:24 +02:00
|
|
|
if (is_deferred) {
|
|
|
|
// we no longer have the block, and will not respond, but a
|
|
|
|
// packet was allocated in MSHR::handleSnoop and we have
|
|
|
|
// to delete it
|
|
|
|
assert(pkt->needsResponse());
|
|
|
|
|
|
|
|
// we have passed the block to a cache upstream, that
|
|
|
|
// cache should be responding
|
|
|
|
assert(pkt->cacheResponding());
|
|
|
|
|
|
|
|
delete pkt;
|
|
|
|
}
|
|
|
|
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "%s snoop miss for %s addr %#llx size %d\n",
|
2015-08-21 13:03:20 +02:00
|
|
|
__func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
2015-09-25 13:13:54 +02:00
|
|
|
return snoop_delay;
|
2015-08-21 13:03:20 +02:00
|
|
|
} else {
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(Cache, "%s snoop hit for %s addr %#llx size %d, "
|
|
|
|
"old state is %s\n", __func__, pkt->cmdString(),
|
|
|
|
pkt->getAddr(), pkt->getSize(), blk->print());
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
chatty_assert(!(isReadOnly && blk->isDirty()),
|
|
|
|
"Should never have a dirty block in a read-only cache %s\n",
|
|
|
|
name());
|
|
|
|
|
|
|
|
// We may end up modifying both the block state and the packet (if
|
|
|
|
// we respond in atomic mode), so just figure out what to do now
|
|
|
|
// and then do it later. If we find dirty data while snooping for
|
|
|
|
// an invalidate, we don't need to send a response. The
|
|
|
|
// invalidation itself is taken care of below.
|
|
|
|
bool respond = blk->isDirty() && pkt->needsResponse() &&
|
|
|
|
pkt->cmd != MemCmd::InvalidateReq;
|
2015-12-31 15:32:58 +01:00
|
|
|
bool have_writable = blk->isWritable();
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// Invalidate any prefetch's from below that would strip write permissions
|
|
|
|
// MemCmd::HardPFReq is only observed by upstream caches. After missing
|
|
|
|
// above and in it's own cache, a new MemCmd::ReadReq is created that
|
|
|
|
// downstream caches observe.
|
|
|
|
if (pkt->mustCheckAbove()) {
|
2016-05-26 12:56:24 +02:00
|
|
|
DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
|
|
|
|
"from lower cache\n", pkt->getAddr(), pkt->cmdString());
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->setBlockCached();
|
2015-09-25 13:13:54 +02:00
|
|
|
return snoop_delay;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
2015-12-31 15:33:25 +01:00
|
|
|
if (pkt->isRead() && !invalidate) {
|
|
|
|
// reading without requiring the line in a writable state
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(!needs_writable);
|
|
|
|
pkt->setHasSharers();
|
2015-12-31 15:33:25 +01:00
|
|
|
|
|
|
|
// if the requesting packet is uncacheable, retain the line in
|
|
|
|
// the current state, otherwhise unset the writable flag,
|
|
|
|
// which means we go from Modified to Owned (and will respond
|
|
|
|
// below), remain in Owned (and will respond below), from
|
|
|
|
// Exclusive to Shared, or remain in Shared
|
|
|
|
if (!pkt->req->isUncacheable())
|
|
|
|
blk->status &= ~BlkWritable;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (respond) {
|
|
|
|
// prevent anyone else from responding, cache as well as
|
|
|
|
// memory, and also prevent any memory from even seeing the
|
2015-12-31 15:32:58 +01:00
|
|
|
// request
|
|
|
|
pkt->setCacheResponding();
|
|
|
|
if (have_writable) {
|
|
|
|
// inform the cache hierarchy that this cache had the line
|
|
|
|
// in the Modified state so that we avoid unnecessary
|
|
|
|
// invalidations (see Packet::setResponderHadWritable)
|
|
|
|
pkt->setResponderHadWritable();
|
|
|
|
|
2015-09-04 19:13:58 +02:00
|
|
|
// in the case of an uncacheable request there is no point
|
2015-12-31 15:32:58 +01:00
|
|
|
// in setting the responderHadWritable flag, but since the
|
|
|
|
// recipient does not care there is no harm in doing so
|
|
|
|
} else {
|
|
|
|
// if the packet has needsWritable set we invalidate our
|
|
|
|
// copy below and all other copies will be invalidates
|
|
|
|
// through express snoops, and if needsWritable is not set
|
|
|
|
// we already called setHasSharers above
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
2015-12-31 15:32:58 +01:00
|
|
|
|
2015-12-31 15:33:25 +01:00
|
|
|
// if we are returning a writable and dirty (Modified) line,
|
|
|
|
// we should be invalidating the line
|
|
|
|
panic_if(!invalidate && !pkt->hasSharers(),
|
|
|
|
"%s is passing a Modified line through %s to %#llx, "
|
|
|
|
"but keeping the block",
|
|
|
|
name(), pkt->cmdString(), pkt->getAddr());
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
if (is_timing) {
|
|
|
|
doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
|
|
|
|
} else {
|
|
|
|
pkt->makeAtomicResponse();
|
2015-12-31 15:33:39 +01:00
|
|
|
// packets such as upgrades do not actually have any data
|
|
|
|
// payload
|
|
|
|
if (pkt->hasData())
|
|
|
|
pkt->setDataFromBlock(blk->data, blkSize);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!respond && is_timing && is_deferred) {
|
2015-12-17 23:07:11 +01:00
|
|
|
// if it's a deferred timing snoop to which we are not
|
|
|
|
// responding, then we've made a copy of both the request and
|
|
|
|
// the packet, delete them here
|
2015-08-21 13:03:20 +02:00
|
|
|
assert(pkt->needsResponse());
|
2016-05-26 12:56:24 +02:00
|
|
|
assert(!pkt->cacheResponding());
|
2015-08-21 13:03:20 +02:00
|
|
|
delete pkt->req;
|
|
|
|
delete pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do this last in case it deallocates block data or something
|
|
|
|
// like that
|
|
|
|
if (invalidate) {
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
invalidateBlock(blk);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(Cache, "new state is %s\n", blk->print());
|
2015-09-25 13:13:54 +02:00
|
|
|
|
|
|
|
return snoop_delay;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::recvTimingSnoopReq(PacketPtr pkt)
|
|
|
|
{
|
2015-12-31 18:32:09 +01:00
|
|
|
DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
|
2015-08-21 13:03:20 +02:00
|
|
|
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
|
|
|
|
|
|
|
|
// Snoops shouldn't happen when bypassing caches
|
|
|
|
assert(!system->bypassCaches());
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// no need to snoop requests that are not in range
|
2015-08-21 13:03:20 +02:00
|
|
|
if (!inRange(pkt->getAddr())) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool is_secure = pkt->isSecure();
|
|
|
|
CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
|
|
|
|
|
|
|
|
Addr blk_addr = blockAlign(pkt->getAddr());
|
|
|
|
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
|
|
|
|
|
2015-09-25 13:13:54 +02:00
|
|
|
// Update the latency cost of the snoop so that the crossbar can
|
|
|
|
// account for it. Do not overwrite what other neighbouring caches
|
|
|
|
// have already done, rather take the maximum. The update is
|
|
|
|
// tentative, for cases where we return before an upward snoop
|
|
|
|
// happens below.
|
|
|
|
pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
|
|
|
|
lookupLatency * clockPeriod());
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// Inform request(Prefetch, CleanEvict or Writeback) from below of
|
|
|
|
// MSHR hit, set setBlockCached.
|
|
|
|
if (mshr && pkt->mustCheckAbove()) {
|
|
|
|
DPRINTF(Cache, "Setting block cached for %s from"
|
|
|
|
"lower cache on mshr hit %#x\n",
|
|
|
|
pkt->cmdString(), pkt->getAddr());
|
|
|
|
pkt->setBlockCached();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Let the MSHR itself track the snoop and decide whether we want
|
|
|
|
// to go ahead and do the regular cache snoop
|
|
|
|
if (mshr && mshr->handleSnoop(pkt, order++)) {
|
|
|
|
DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
|
|
|
|
"mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
|
|
|
|
mshr->print());
|
|
|
|
|
|
|
|
if (mshr->getNumTargets() > numTarget)
|
|
|
|
warn("allocating bonus target for snoop"); //handle later
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
//We also need to check the writeback buffers and handle those
|
2016-03-17 14:51:18 +01:00
|
|
|
WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
|
|
|
|
if (wb_entry) {
|
2015-08-21 13:03:20 +02:00
|
|
|
DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
|
|
|
|
pkt->getAddr(), is_secure ? "s" : "ns");
|
|
|
|
// Expect to see only Writebacks and/or CleanEvicts here, both of
|
|
|
|
// which should not be generated for uncacheable data.
|
|
|
|
assert(!wb_entry->isUncacheable());
|
|
|
|
// There should only be a single request responsible for generating
|
|
|
|
// Writebacks/CleanEvicts.
|
|
|
|
assert(wb_entry->getNumTargets() == 1);
|
|
|
|
PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(wb_pkt->isEviction());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
if (pkt->isEviction()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// if the block is found in the write queue, set the BLOCK_CACHED
|
|
|
|
// flag for Writeback/CleanEvict snoop. On return the snoop will
|
|
|
|
// propagate the BLOCK_CACHED flag in Writeback packets and prevent
|
|
|
|
// any CleanEvicts from travelling down the memory hierarchy.
|
|
|
|
pkt->setBlockCached();
|
|
|
|
DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
|
|
|
|
" %#x\n", pkt->cmdString(), pkt->getAddr());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-02-10 10:08:24 +01:00
|
|
|
// conceptually writebacks are no different to other blocks in
|
|
|
|
// this cache, so the behaviour is modelled after handleSnoop,
|
|
|
|
// the difference being that instead of querying the block
|
|
|
|
// state to determine if it is dirty and writable, we use the
|
|
|
|
// command and fields of the writeback packet
|
|
|
|
bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
|
|
|
|
pkt->needsResponse() && pkt->cmd != MemCmd::InvalidateReq;
|
|
|
|
bool have_writable = !wb_pkt->hasSharers();
|
|
|
|
bool invalidate = pkt->isInvalidate();
|
|
|
|
|
|
|
|
if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
|
|
|
|
assert(!pkt->needsWritable());
|
|
|
|
pkt->setHasSharers();
|
|
|
|
wb_pkt->setHasSharers();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (respond) {
|
2015-12-31 15:32:58 +01:00
|
|
|
pkt->setCacheResponding();
|
2016-02-10 10:08:24 +01:00
|
|
|
|
|
|
|
if (have_writable) {
|
|
|
|
pkt->setResponderHadWritable();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
2016-02-10 10:08:24 +01:00
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
|
|
|
|
false, false);
|
|
|
|
}
|
|
|
|
|
2016-02-10 10:08:24 +01:00
|
|
|
if (invalidate) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// Invalidation trumps our writeback... discard here
|
|
|
|
// Note: markInService will remove entry from writeback buffer.
|
2016-03-17 14:51:18 +01:00
|
|
|
markInService(wb_entry);
|
2015-08-21 13:03:20 +02:00
|
|
|
delete wb_pkt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this was a shared writeback, there may still be
|
|
|
|
// other shared copies above that require invalidation.
|
|
|
|
// We could be more selective and return here if the
|
|
|
|
// request is non-exclusive or if the writeback is
|
|
|
|
// exclusive.
|
2015-09-25 13:13:54 +02:00
|
|
|
uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
|
|
|
|
|
|
|
|
// Override what we did when we first saw the snoop, as we now
|
|
|
|
// also have the cost of the upwards snoops to account for
|
|
|
|
pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
|
|
|
|
lookupLatency * clockPeriod());
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// Express snoop responses from master to slave, e.g., from L1 to L2
|
|
|
|
cache->recvTimingSnoopResp(pkt);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tick
|
|
|
|
Cache::recvAtomicSnoop(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// Snoops shouldn't happen when bypassing caches
|
|
|
|
assert(!system->bypassCaches());
|
|
|
|
|
2015-09-25 13:26:57 +02:00
|
|
|
// no need to snoop requests that are not in range.
|
|
|
|
if (!inRange(pkt->getAddr())) {
|
2015-08-21 13:03:20 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
|
2015-09-25 13:13:54 +02:00
|
|
|
uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
|
|
|
|
return snoop_delay + lookupLatency * clockPeriod();
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
QueueEntry*
|
|
|
|
Cache::getNextQueueEntry()
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
|
|
|
// Check both MSHR queue and write buffer for potential requests,
|
|
|
|
// note that null does not mean there is no request, it could
|
|
|
|
// simply be that it is not ready
|
2016-03-17 14:51:18 +01:00
|
|
|
MSHR *miss_mshr = mshrQueue.getNext();
|
|
|
|
WriteQueueEntry *wq_entry = writeBuffer.getNext();
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
// If we got a write buffer request ready, first priority is a
|
2016-04-21 10:48:07 +02:00
|
|
|
// full write buffer, otherwise we favour the miss requests
|
|
|
|
if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// need to search MSHR queue for conflicting earlier miss.
|
|
|
|
MSHR *conflict_mshr =
|
2016-03-17 14:51:18 +01:00
|
|
|
mshrQueue.findPending(wq_entry->blkAddr,
|
|
|
|
wq_entry->isSecure);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// Service misses in order until conflict is cleared.
|
|
|
|
return conflict_mshr;
|
|
|
|
|
|
|
|
// @todo Note that we ignore the ready time of the conflict here
|
|
|
|
}
|
|
|
|
|
|
|
|
// No conflicts; issue write
|
2016-03-17 14:51:18 +01:00
|
|
|
return wq_entry;
|
2015-08-21 13:03:20 +02:00
|
|
|
} else if (miss_mshr) {
|
|
|
|
// need to check for conflicting earlier writeback
|
2016-03-17 14:51:18 +01:00
|
|
|
WriteQueueEntry *conflict_mshr =
|
2015-08-21 13:03:20 +02:00
|
|
|
writeBuffer.findPending(miss_mshr->blkAddr,
|
|
|
|
miss_mshr->isSecure);
|
|
|
|
if (conflict_mshr) {
|
|
|
|
// not sure why we don't check order here... it was in the
|
|
|
|
// original code but commented out.
|
|
|
|
|
|
|
|
// The only way this happens is if we are
|
|
|
|
// doing a write and we didn't have permissions
|
|
|
|
// then subsequently saw a writeback (owned got evicted)
|
|
|
|
// We need to make sure to perform the writeback first
|
|
|
|
// To preserve the dirty data, then we can issue the write
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// should we return wq_entry here instead? I.e. do we
|
2015-08-21 13:03:20 +02:00
|
|
|
// have to flush writes in order? I don't think so... not
|
|
|
|
// for Alpha anyway. Maybe for x86?
|
|
|
|
return conflict_mshr;
|
|
|
|
|
|
|
|
// @todo Note that we ignore the ready time of the conflict here
|
|
|
|
}
|
|
|
|
|
|
|
|
// No conflicts; issue read
|
|
|
|
return miss_mshr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// fall through... no pending requests. Try a prefetch.
|
2016-03-17 14:51:18 +01:00
|
|
|
assert(!miss_mshr && !wq_entry);
|
2015-08-21 13:03:20 +02:00
|
|
|
if (prefetcher && mshrQueue.canPrefetch()) {
|
|
|
|
// If we have a miss queue slot, we can try a prefetch
|
|
|
|
PacketPtr pkt = prefetcher->getPacket();
|
|
|
|
if (pkt) {
|
|
|
|
Addr pf_addr = blockAlign(pkt->getAddr());
|
|
|
|
if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
|
|
|
|
!mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
|
|
|
|
!writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
|
|
|
|
// Update statistic on number of prefetches issued
|
|
|
|
// (hwpf_mshr_misses)
|
|
|
|
assert(pkt->req->masterId() < system->maxMasters());
|
|
|
|
mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
|
|
|
|
|
|
|
|
// allocate an MSHR and return it, note
|
|
|
|
// that we send the packet straight away, so do not
|
|
|
|
// schedule the send
|
|
|
|
return allocateMissBuffer(pkt, curTick(), false);
|
|
|
|
} else {
|
|
|
|
// free the request and packet
|
|
|
|
delete pkt->req;
|
|
|
|
delete pkt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
return nullptr;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2015-09-25 13:26:57 +02:00
|
|
|
Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
|
|
|
if (!forwardSnoops)
|
|
|
|
return false;
|
|
|
|
// Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
|
|
|
|
// Writeback snoops into upper level caches to check for copies of the
|
|
|
|
// same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
|
|
|
|
// packet, the cache can inform the crossbar below of presence or absence
|
|
|
|
// of the block.
|
2015-09-25 13:26:57 +02:00
|
|
|
if (is_timing) {
|
|
|
|
Packet snoop_pkt(pkt, true, false);
|
|
|
|
snoop_pkt.setExpressSnoop();
|
|
|
|
// Assert that packet is either Writeback or CleanEvict and not a
|
|
|
|
// prefetch request because prefetch requests need an MSHR and may
|
|
|
|
// generate a snoop response.
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
assert(pkt->isEviction());
|
2016-05-26 12:56:24 +02:00
|
|
|
snoop_pkt.senderState = nullptr;
|
2015-09-25 13:26:57 +02:00
|
|
|
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
|
|
|
|
// Writeback/CleanEvict snoops do not generate a snoop response.
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(!(snoop_pkt.cacheResponding()));
|
2015-09-25 13:26:57 +02:00
|
|
|
return snoop_pkt.isBlockCached();
|
|
|
|
} else {
|
|
|
|
cpuSidePort->sendAtomicSnoop(pkt);
|
|
|
|
return pkt->isBlockCached();
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
Tick
|
|
|
|
Cache::nextQueueReadyTime() const
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
2016-03-17 14:51:18 +01:00
|
|
|
Tick nextReady = std::min(mshrQueue.nextReadyTime(),
|
|
|
|
writeBuffer.nextReadyTime());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// Don't signal prefetch ready time if no MSHRs available
|
|
|
|
// Will signal once enoguh MSHRs are deallocated
|
|
|
|
if (prefetcher && mshrQueue.canPrefetch()) {
|
|
|
|
nextReady = std::min(nextReady,
|
|
|
|
prefetcher->nextPrefetchReadyTime());
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
return nextReady;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::sendMSHRQueuePacket(MSHR* mshr)
|
|
|
|
{
|
|
|
|
assert(mshr);
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// use request from 1st target
|
|
|
|
PacketPtr tgt_pkt = mshr->getTarget()->pkt;
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
DPRINTF(Cache, "%s MSHR %s for addr %#llx size %d\n", __func__,
|
|
|
|
tgt_pkt->cmdString(), tgt_pkt->getAddr(),
|
|
|
|
tgt_pkt->getSize());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
|
|
|
CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
|
|
|
|
|
|
|
|
if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
|
2016-03-17 14:51:18 +01:00
|
|
|
// we should never have hardware prefetches to allocated
|
|
|
|
// blocks
|
2016-05-26 12:56:24 +02:00
|
|
|
assert(blk == nullptr);
|
2016-03-17 14:51:18 +01:00
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// We need to check the caches above us to verify that
|
|
|
|
// they don't have a copy of this block in the dirty state
|
|
|
|
// at the moment. Without this check we could get a stale
|
|
|
|
// copy from memory that might get used in place of the
|
|
|
|
// dirty one.
|
|
|
|
Packet snoop_pkt(tgt_pkt, true, false);
|
|
|
|
snoop_pkt.setExpressSnoop();
|
2015-12-28 17:14:10 +01:00
|
|
|
// We are sending this packet upwards, but if it hits we will
|
|
|
|
// get a snoop response that we end up treating just like a
|
|
|
|
// normal response, hence it needs the MSHR as its sender
|
|
|
|
// state
|
2015-08-21 13:03:20 +02:00
|
|
|
snoop_pkt.senderState = mshr;
|
|
|
|
cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
|
|
|
|
|
|
|
|
// Check to see if the prefetch was squashed by an upper cache (to
|
|
|
|
// prevent us from grabbing the line) or if a Check to see if a
|
|
|
|
// writeback arrived between the time the prefetch was placed in
|
|
|
|
// the MSHRs and when it was selected to be sent or if the
|
|
|
|
// prefetch was squashed by an upper cache.
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// It is important to check cacheResponding before
|
|
|
|
// prefetchSquashed. If another cache has committed to
|
|
|
|
// responding, it will be sending a dirty response which will
|
|
|
|
// arrive at the MSHR allocated for this request. Checking the
|
|
|
|
// prefetchSquash first may result in the MSHR being
|
|
|
|
// prematurely deallocated.
|
|
|
|
if (snoop_pkt.cacheResponding()) {
|
2015-12-28 17:14:14 +01:00
|
|
|
auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
|
|
|
|
assert(r.second);
|
2015-12-31 15:32:58 +01:00
|
|
|
|
|
|
|
// if we are getting a snoop response with no sharers it
|
|
|
|
// will be allocated as Modified
|
|
|
|
bool pending_modified_resp = !snoop_pkt.hasSharers();
|
|
|
|
markInService(mshr, pending_modified_resp);
|
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
DPRINTF(Cache, "Upward snoop of prefetch for addr"
|
|
|
|
" %#x (%s) hit\n",
|
|
|
|
tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
|
2016-03-17 14:51:18 +01:00
|
|
|
return false;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
if (snoop_pkt.isBlockCached()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
DPRINTF(Cache, "Block present, prefetch squashed by cache. "
|
|
|
|
"Deallocating mshr target %#x.\n",
|
|
|
|
mshr->blkAddr);
|
2016-03-17 14:51:18 +01:00
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
// Deallocate the mshr target
|
2016-03-17 14:51:18 +01:00
|
|
|
if (mshrQueue.forceDeallocateTarget(mshr)) {
|
2015-12-28 17:14:15 +01:00
|
|
|
// Clear block if this deallocation resulted freed an
|
|
|
|
// mshr when all had previously been utilized
|
2016-03-17 14:51:18 +01:00
|
|
|
clearBlocked(Blocked_NoMSHRs);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
2016-03-17 14:51:18 +01:00
|
|
|
return false;
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// either a prefetch that is not present upstream, or a normal
|
|
|
|
// MSHR request, proceed to get the packet to send downstream
|
2016-04-21 10:48:06 +02:00
|
|
|
PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable());
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-05-26 12:56:24 +02:00
|
|
|
mshr->isForward = (pkt == nullptr);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
if (mshr->isForward) {
|
|
|
|
// not a cache block request, but a response is expected
|
|
|
|
// make copy of current packet to forward, keep current
|
|
|
|
// copy for response handling
|
|
|
|
pkt = new Packet(tgt_pkt, false, true);
|
|
|
|
assert(!pkt->isWrite());
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// play it safe and append (rather than set) the sender state,
|
|
|
|
// as forwarded packets may already have existing state
|
2015-12-28 17:14:10 +01:00
|
|
|
pkt->pushSenderState(mshr);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
if (!memSidePort->sendTimingReq(pkt)) {
|
|
|
|
// we are awaiting a retry, but we
|
|
|
|
// delete the packet and will be creating a new packet
|
|
|
|
// when we get the opportunity
|
|
|
|
delete pkt;
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// note that we have now masked any requestBus and
|
|
|
|
// schedSendEvent (we will wait for a retry before
|
|
|
|
// doing anything), and this is so even if we do not
|
|
|
|
// care about this packet and might override it before
|
|
|
|
// it gets retried
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
// As part of the call to sendTimingReq the packet is
|
|
|
|
// forwarded to all neighbouring caches (and any caches
|
|
|
|
// above them) as a snoop. Thus at this point we know if
|
|
|
|
// any of the neighbouring caches are responding, and if
|
|
|
|
// so, we know it is dirty, and we can determine if it is
|
|
|
|
// being passed as Modified, making our MSHR the ordering
|
|
|
|
// point
|
|
|
|
bool pending_modified_resp = !pkt->hasSharers() &&
|
|
|
|
pkt->cacheResponding();
|
|
|
|
markInService(mshr, pending_modified_resp);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
|
2015-08-21 13:03:20 +02:00
|
|
|
{
|
2016-03-17 14:51:18 +01:00
|
|
|
assert(wq_entry);
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
// always a single target for write queue entries
|
|
|
|
PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
|
2015-08-21 13:03:20 +02:00
|
|
|
|
2016-03-17 14:51:18 +01:00
|
|
|
DPRINTF(Cache, "%s write %s for addr %#llx size %d\n", __func__,
|
|
|
|
tgt_pkt->cmdString(), tgt_pkt->getAddr(),
|
|
|
|
tgt_pkt->getSize());
|
|
|
|
|
2016-04-21 10:48:07 +02:00
|
|
|
// forward as is, both for evictions and uncacheable writes
|
|
|
|
if (!memSidePort->sendTimingReq(tgt_pkt)) {
|
2016-03-17 14:51:18 +01:00
|
|
|
// note that we have now masked any requestBus and
|
|
|
|
// schedSendEvent (we will wait for a retry before
|
|
|
|
// doing anything), and this is so even if we do not
|
|
|
|
// care about this packet and might override it before
|
|
|
|
// it gets retried
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
markInService(wq_entry);
|
|
|
|
return false;
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::serialize(CheckpointOut &cp) const
|
|
|
|
{
|
|
|
|
bool dirty(isDirty());
|
|
|
|
|
|
|
|
if (dirty) {
|
|
|
|
warn("*** The cache still contains dirty data. ***\n");
|
|
|
|
warn(" Make sure to drain the system using the correct flags.\n");
|
2016-05-26 12:56:24 +02:00
|
|
|
warn(" This checkpoint will not restore correctly and dirty data "
|
|
|
|
" in the cache will be lost!\n");
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Since we don't checkpoint the data in the cache, any dirty data
|
|
|
|
// will be lost when restoring from a checkpoint of a system that
|
|
|
|
// wasn't drained properly. Flag the checkpoint as invalid if the
|
|
|
|
// cache contains dirty data.
|
|
|
|
bool bad_checkpoint(dirty);
|
|
|
|
SERIALIZE_SCALAR(bad_checkpoint);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::unserialize(CheckpointIn &cp)
|
|
|
|
{
|
|
|
|
bool bad_checkpoint;
|
|
|
|
UNSERIALIZE_SCALAR(bad_checkpoint);
|
|
|
|
if (bad_checkpoint) {
|
|
|
|
fatal("Restoring from checkpoints with dirty caches is not supported "
|
|
|
|
"in the classic memory system. Please remove any caches or "
|
|
|
|
" drain them properly before taking checkpoints.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////
|
|
|
|
//
|
|
|
|
// CpuSidePort
|
|
|
|
//
|
|
|
|
///////////////
|
|
|
|
|
|
|
|
AddrRangeList
|
|
|
|
Cache::CpuSidePort::getAddrRanges() const
|
|
|
|
{
|
|
|
|
return cache->getAddrRanges();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
assert(!cache->system->bypassCaches());
|
|
|
|
|
|
|
|
bool success = false;
|
|
|
|
|
2016-02-10 10:08:25 +01:00
|
|
|
// always let express snoop packets through if even if blocked
|
|
|
|
if (pkt->isExpressSnoop()) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// do not change the current retry state
|
|
|
|
bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
|
|
|
|
assert(bypass_success);
|
|
|
|
return true;
|
|
|
|
} else if (blocked || mustSendRetry) {
|
|
|
|
// either already committed to send a retry, or blocked
|
|
|
|
success = false;
|
|
|
|
} else {
|
|
|
|
// pass it on to the cache, and let the cache decide if we
|
|
|
|
// have to retry or not
|
|
|
|
success = cache->recvTimingReq(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
// remember if we have to retry
|
|
|
|
mustSendRetry = !success;
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tick
|
|
|
|
Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
return cache->recvAtomic(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// functional request
|
|
|
|
cache->functionalAccess(pkt, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::
|
|
|
|
CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
|
|
|
|
const std::string &_label)
|
|
|
|
: BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-08-21 13:03:23 +02:00
|
|
|
Cache*
|
|
|
|
CacheParams::create()
|
|
|
|
{
|
|
|
|
assert(tags);
|
|
|
|
|
|
|
|
return new Cache(this);
|
|
|
|
}
|
2015-08-21 13:03:20 +02:00
|
|
|
///////////////
|
|
|
|
//
|
|
|
|
// MemSidePort
|
|
|
|
//
|
|
|
|
///////////////
|
|
|
|
|
|
|
|
bool
|
|
|
|
Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
cache->recvTimingResp(pkt);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Express snooping requests to memside port
|
|
|
|
void
|
|
|
|
Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// handle snooping requests
|
|
|
|
cache->recvTimingSnoopReq(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
Tick
|
|
|
|
Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
return cache->recvAtomicSnoop(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
|
|
|
|
{
|
|
|
|
// functional snoop (note that in contrast to atomic we don't have
|
|
|
|
// a specific functionalSnoop method, as they have the same
|
|
|
|
// behaviour regardless)
|
|
|
|
cache->functionalAccess(pkt, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
Cache::CacheReqPacketQueue::sendDeferredPacket()
|
|
|
|
{
|
|
|
|
// sanity check
|
|
|
|
assert(!waitingOnRetry);
|
|
|
|
|
|
|
|
// there should never be any deferred request packets in the
|
|
|
|
// queue, instead we resly on the cache to provide the packets
|
|
|
|
// from the MSHR queue or write queue
|
|
|
|
assert(deferredPacketReadyTime() == MaxTick);
|
|
|
|
|
|
|
|
// check for request packets (requests & writebacks)
|
2016-03-17 14:51:18 +01:00
|
|
|
QueueEntry* entry = cache.getNextQueueEntry();
|
|
|
|
|
|
|
|
if (!entry) {
|
2015-08-21 13:03:20 +02:00
|
|
|
// can happen if e.g. we attempt a writeback and fail, but
|
|
|
|
// before the retry, the writeback is eliminated because
|
|
|
|
// we snoop another cache's ReadEx.
|
|
|
|
} else {
|
|
|
|
// let our snoop responses go first if there are responses to
|
2016-03-17 14:51:18 +01:00
|
|
|
// the same addresses
|
|
|
|
if (checkConflictingSnoop(entry->blkAddr)) {
|
2015-08-21 13:03:20 +02:00
|
|
|
return;
|
|
|
|
}
|
2016-03-17 14:51:18 +01:00
|
|
|
waitingOnRetry = entry->sendPacket(cache);
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// if we succeeded and are not waiting for a retry, schedule the
|
2016-03-17 14:51:18 +01:00
|
|
|
// next send considering when the next queue is ready, note that
|
2015-08-21 13:03:20 +02:00
|
|
|
// snoop responses have their own packet queue and thus schedule
|
|
|
|
// their own events
|
|
|
|
if (!waitingOnRetry) {
|
2016-03-17 14:51:18 +01:00
|
|
|
schedSendEvent(cache.nextQueueReadyTime());
|
2015-08-21 13:03:20 +02:00
|
|
|
}
|
|
|
|
}
|
2006-06-28 17:02:14 +02:00
|
|
|
|
2015-08-21 13:03:20 +02:00
|
|
|
Cache::
|
|
|
|
MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
|
|
|
|
const std::string &_label)
|
|
|
|
: BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
|
|
|
|
_reqQueue(*_cache, *this, _snoopRespQueue, _label),
|
|
|
|
_snoopRespQueue(*_cache, *this, _label), cache(_cache)
|
|
|
|
{
|
|
|
|
}
|