2015-05-05 09:22:22 +02:00
|
|
|
# Copyright (c) 2012-2013, 2015 ARM Limited
|
2012-10-15 14:10:54 +02:00
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# The license below extends only to copyright in the software and shall
|
|
|
|
# not be construed as granting a license to any other intellectual
|
|
|
|
# property including but not limited to intellectual property relating
|
|
|
|
# to a hardware implementation of the functionality of the software
|
|
|
|
# licensed hereunder. You may use the software subject to the license
|
|
|
|
# terms below provided that you ensure that this notice is replicated
|
|
|
|
# unmodified and in its entirety in all distributions of the software,
|
|
|
|
# modified or unmodified, in source code or in binary form.
|
|
|
|
#
|
2007-05-28 04:21:17 +02:00
|
|
|
# Copyright (c) 2005-2007 The Regents of The University of Michigan
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# Redistribution and use in source and binary forms, with or without
|
|
|
|
# modification, are permitted provided that the following conditions are
|
|
|
|
# met: redistributions of source code must retain the above copyright
|
|
|
|
# notice, this list of conditions and the following disclaimer;
|
|
|
|
# redistributions in binary form must reproduce the above copyright
|
|
|
|
# notice, this list of conditions and the following disclaimer in the
|
|
|
|
# documentation and/or other materials provided with the distribution;
|
|
|
|
# neither the name of the copyright holders nor the names of its
|
|
|
|
# contributors may be used to endorse or promote products derived from
|
|
|
|
# this software without specific prior written permission.
|
|
|
|
#
|
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
#
|
|
|
|
# Authors: Nathan Binkert
|
2015-08-21 13:03:23 +02:00
|
|
|
# Andreas Hansson
|
2007-05-28 04:21:17 +02:00
|
|
|
|
2006-09-05 02:14:07 +02:00
|
|
|
from m5.params import *
|
2012-02-12 23:07:39 +01:00
|
|
|
from m5.proxy import *
|
2006-06-30 22:25:35 +02:00
|
|
|
from MemObject import MemObject
|
2012-02-12 23:07:38 +01:00
|
|
|
from Prefetcher import BasePrefetcher
|
2013-06-27 11:49:50 +02:00
|
|
|
from Tags import *
|
2005-04-03 03:36:08 +02:00
|
|
|
|
2006-06-30 22:25:35 +02:00
|
|
|
class BaseCache(MemObject):
|
2005-02-03 03:13:01 +01:00
|
|
|
type = 'BaseCache'
|
2015-08-21 13:03:23 +02:00
|
|
|
abstract = True
|
2012-11-02 17:32:01 +01:00
|
|
|
cxx_header = "mem/cache/base.hh"
|
2015-05-05 09:22:22 +02:00
|
|
|
|
|
|
|
size = Param.MemorySize("Capacity")
|
|
|
|
assoc = Param.Unsigned("Associativity")
|
|
|
|
|
|
|
|
hit_latency = Param.Cycles("Hit latency")
|
|
|
|
response_latency = Param.Cycles("Latency for the return path on a miss");
|
|
|
|
|
2005-01-15 10:12:25 +01:00
|
|
|
max_miss_count = Param.Counter(0,
|
2015-05-05 09:22:22 +02:00
|
|
|
"Number of misses to handle before calling exit")
|
|
|
|
|
|
|
|
mshrs = Param.Unsigned("Number of MSHRs (max outstanding requests)")
|
|
|
|
demand_mshr_reserve = Param.Unsigned(1, "MSHRs reserved for demand access")
|
|
|
|
tgts_per_mshr = Param.Unsigned("Max number of accesses per MSHR")
|
|
|
|
write_buffers = Param.Unsigned(8, "Number of write buffers")
|
|
|
|
|
2015-07-03 16:14:39 +02:00
|
|
|
is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)")
|
2015-05-05 09:22:22 +02:00
|
|
|
|
2012-02-12 23:07:38 +01:00
|
|
|
prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
|
2015-05-05 09:22:22 +02:00
|
|
|
prefetch_on_access = Param.Bool(False,
|
|
|
|
"Notify the hardware prefetcher on every access (not just misses)")
|
|
|
|
|
|
|
|
tags = Param.BaseTags(LRU(), "Tag store (replacement policy)")
|
2014-01-24 22:29:30 +01:00
|
|
|
sequential_access = Param.Bool(False,
|
|
|
|
"Whether to access tags and data sequentially")
|
2015-05-05 09:22:22 +02:00
|
|
|
|
|
|
|
cpu_side = SlavePort("Upstream port closer to the CPU and/or device")
|
|
|
|
mem_side = MasterPort("Downstream port closer to memory")
|
|
|
|
|
|
|
|
addr_ranges = VectorParam.AddrRange([AllMemory],
|
|
|
|
"Address range for the CPU-side port (to allow striping)")
|
|
|
|
|
|
|
|
system = Param.System(Parent.any, "System we belong to")
|
2015-08-21 13:03:23 +02:00
|
|
|
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
# Enum for cache clusivity, currently mostly inclusive or mostly
|
|
|
|
# exclusive.
|
|
|
|
class Clusivity(Enum): vals = ['mostly_incl', 'mostly_excl']
|
|
|
|
|
2015-08-21 13:03:23 +02:00
|
|
|
class Cache(BaseCache):
|
|
|
|
type = 'Cache'
|
|
|
|
cxx_header = 'mem/cache/cache.hh'
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
|
|
|
|
# Control whether this cache should be mostly inclusive or mostly
|
|
|
|
# exclusive with respect to upstream caches. The behaviour on a
|
|
|
|
# fill is determined accordingly. For a mostly inclusive cache,
|
|
|
|
# blocks are allocated on all fill operations. Thus, L1 caches
|
|
|
|
# should be set as mostly inclusive even if they have no upstream
|
|
|
|
# caches. In the case of a mostly exclusive cache, fills are not
|
|
|
|
# allocating unless they came directly from a non-caching source,
|
|
|
|
# e.g. a table walker. Additionally, on a hit from an upstream
|
|
|
|
# cache a line is dropped for a mostly exclusive cache.
|
|
|
|
clusivity = Param.Clusivity('mostly_incl',
|
|
|
|
"Clusivity with upstream cache")
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
|
|
|
|
# Determine if this cache sends out writebacks for clean lines, or
|
|
|
|
# simply clean evicts. In cases where a downstream cache is mostly
|
|
|
|
# exclusive with respect to this cache (acting as a victim cache),
|
|
|
|
# the clean writebacks are essential for performance. In general
|
|
|
|
# this should be set to True for anything but the last-level
|
|
|
|
# cache.
|
|
|
|
writeback_clean = Param.Bool(False, "Writeback clean lines")
|