2012-01-26 22:44:43 +01:00
|
|
|
# Copyright (c) 2012 The Regents of The University of Michigan
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# Redistribution and use in source and binary forms, with or without
|
|
|
|
# modification, are permitted provided that the following conditions are
|
|
|
|
# met: redistributions of source code must retain the above copyright
|
|
|
|
# notice, this list of conditions and the following disclaimer;
|
|
|
|
# redistributions in binary form must reproduce the above copyright
|
|
|
|
# notice, this list of conditions and the following disclaimer in the
|
|
|
|
# documentation and/or other materials provided with the distribution;
|
|
|
|
# neither the name of the copyright holders nor the names of its
|
|
|
|
# contributors may be used to endorse or promote products derived from
|
|
|
|
# this software without specific prior written permission.
|
|
|
|
#
|
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
#
|
|
|
|
# Authors: Ron Dreslinski
|
|
|
|
|
|
|
|
|
|
|
|
from m5.objects import *
|
|
|
|
|
|
|
|
# Simple ALU Instructions have a latency of 1
|
|
|
|
class O3_ARM_v7a_Simple_Int(FUDesc):
|
|
|
|
opList = [ OpDesc(opClass='IntAlu', opLat=1) ]
|
|
|
|
count = 2
|
|
|
|
|
|
|
|
# Complex ALU instructions have a variable latencies
|
|
|
|
class O3_ARM_v7a_Complex_Int(FUDesc):
|
2015-04-30 05:35:22 +02:00
|
|
|
opList = [ OpDesc(opClass='IntMult', opLat=3, pipelined=True),
|
|
|
|
OpDesc(opClass='IntDiv', opLat=12, pipelined=False),
|
|
|
|
OpDesc(opClass='IprAccess', opLat=3, pipelined=True) ]
|
2012-01-26 22:44:43 +01:00
|
|
|
count = 1
|
|
|
|
|
|
|
|
|
2014-06-30 19:50:01 +02:00
|
|
|
# Floating point and SIMD instructions
|
2012-01-26 22:44:43 +01:00
|
|
|
class O3_ARM_v7a_FP(FUDesc):
|
|
|
|
opList = [ OpDesc(opClass='SimdAdd', opLat=4),
|
|
|
|
OpDesc(opClass='SimdAddAcc', opLat=4),
|
|
|
|
OpDesc(opClass='SimdAlu', opLat=4),
|
|
|
|
OpDesc(opClass='SimdCmp', opLat=4),
|
|
|
|
OpDesc(opClass='SimdCvt', opLat=3),
|
|
|
|
OpDesc(opClass='SimdMisc', opLat=3),
|
|
|
|
OpDesc(opClass='SimdMult',opLat=5),
|
|
|
|
OpDesc(opClass='SimdMultAcc',opLat=5),
|
|
|
|
OpDesc(opClass='SimdShift',opLat=3),
|
|
|
|
OpDesc(opClass='SimdShiftAcc', opLat=3),
|
|
|
|
OpDesc(opClass='SimdSqrt', opLat=9),
|
|
|
|
OpDesc(opClass='SimdFloatAdd',opLat=5),
|
|
|
|
OpDesc(opClass='SimdFloatAlu',opLat=5),
|
|
|
|
OpDesc(opClass='SimdFloatCmp', opLat=3),
|
|
|
|
OpDesc(opClass='SimdFloatCvt', opLat=3),
|
|
|
|
OpDesc(opClass='SimdFloatDiv', opLat=3),
|
|
|
|
OpDesc(opClass='SimdFloatMisc', opLat=3),
|
|
|
|
OpDesc(opClass='SimdFloatMult', opLat=3),
|
|
|
|
OpDesc(opClass='SimdFloatMultAcc',opLat=1),
|
|
|
|
OpDesc(opClass='SimdFloatSqrt', opLat=9),
|
|
|
|
OpDesc(opClass='FloatAdd', opLat=5),
|
|
|
|
OpDesc(opClass='FloatCmp', opLat=5),
|
|
|
|
OpDesc(opClass='FloatCvt', opLat=5),
|
2015-04-30 05:35:22 +02:00
|
|
|
OpDesc(opClass='FloatDiv', opLat=9, pipelined=False),
|
|
|
|
OpDesc(opClass='FloatSqrt', opLat=33, pipelined=False),
|
2012-01-26 22:44:43 +01:00
|
|
|
OpDesc(opClass='FloatMult', opLat=4) ]
|
|
|
|
count = 2
|
|
|
|
|
|
|
|
|
|
|
|
# Load/Store Units
|
|
|
|
class O3_ARM_v7a_Load(FUDesc):
|
|
|
|
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
|
|
|
|
count = 1
|
|
|
|
|
|
|
|
class O3_ARM_v7a_Store(FUDesc):
|
|
|
|
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
|
|
|
|
count = 1
|
|
|
|
|
|
|
|
# Functional Units for this CPU
|
|
|
|
class O3_ARM_v7a_FUP(FUPool):
|
|
|
|
FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(),
|
|
|
|
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()]
|
|
|
|
|
2014-06-30 19:50:01 +02:00
|
|
|
# Bi-Mode Branch Predictor
|
2015-04-14 00:33:57 +02:00
|
|
|
class O3_ARM_v7a_BP(BiModeBP):
|
2012-01-26 22:44:43 +01:00
|
|
|
globalPredictorSize = 8192
|
|
|
|
globalCtrBits = 2
|
|
|
|
choicePredictorSize = 8192
|
|
|
|
choiceCtrBits = 2
|
|
|
|
BTBEntries = 2048
|
|
|
|
BTBTagSize = 18
|
|
|
|
RASSize = 16
|
|
|
|
instShiftAmt = 2
|
2013-01-24 19:28:51 +01:00
|
|
|
|
|
|
|
class O3_ARM_v7a_3(DerivO3CPU):
|
2012-01-26 22:44:43 +01:00
|
|
|
LQEntries = 16
|
|
|
|
SQEntries = 16
|
|
|
|
LSQDepCheckShift = 0
|
|
|
|
LFSTSize = 1024
|
|
|
|
SSITSize = 1024
|
|
|
|
decodeToFetchDelay = 1
|
|
|
|
renameToFetchDelay = 1
|
|
|
|
iewToFetchDelay = 1
|
|
|
|
commitToFetchDelay = 1
|
|
|
|
renameToDecodeDelay = 1
|
|
|
|
iewToDecodeDelay = 1
|
|
|
|
commitToDecodeDelay = 1
|
|
|
|
iewToRenameDelay = 1
|
|
|
|
commitToRenameDelay = 1
|
|
|
|
commitToIEWDelay = 1
|
|
|
|
fetchWidth = 3
|
2013-11-15 19:21:15 +01:00
|
|
|
fetchBufferSize = 16
|
2012-01-26 22:44:43 +01:00
|
|
|
fetchToDecodeDelay = 3
|
|
|
|
decodeWidth = 3
|
|
|
|
decodeToRenameDelay = 2
|
|
|
|
renameWidth = 3
|
|
|
|
renameToIEWDelay = 1
|
|
|
|
issueToExecuteDelay = 1
|
|
|
|
dispatchWidth = 6
|
|
|
|
issueWidth = 8
|
|
|
|
wbWidth = 8
|
|
|
|
fuPool = O3_ARM_v7a_FUP()
|
|
|
|
iewToCommitDelay = 1
|
|
|
|
renameToROBDelay = 1
|
|
|
|
commitWidth = 8
|
|
|
|
squashWidth = 8
|
|
|
|
trapLatency = 13
|
|
|
|
backComSize = 5
|
|
|
|
forwardComSize = 5
|
|
|
|
numPhysIntRegs = 128
|
arm: Add support for ARMv8 (AArch64 & AArch32)
Note: AArch64 and AArch32 interworking is not supported. If you use an AArch64
kernel you are restricted to AArch64 user-mode binaries. This will be addressed
in a later patch.
Note: Virtualization is only supported in AArch32 mode. This will also be fixed
in a later patch.
Contributors:
Giacomo Gabrielli (TrustZone, LPAE, system-level AArch64, AArch64 NEON, validation)
Thomas Grocutt (AArch32 Virtualization, AArch64 FP, validation)
Mbou Eyole (AArch64 NEON, validation)
Ali Saidi (AArch64 Linux support, code integration, validation)
Edmund Grimley-Evans (AArch64 FP)
William Wang (AArch64 Linux support)
Rene De Jong (AArch64 Linux support, performance opt.)
Matt Horsnell (AArch64 MP, validation)
Matt Evans (device models, code integration, validation)
Chris Adeniyi-Jones (AArch64 syscall-emulation)
Prakash Ramrakhyani (validation)
Dam Sunwoo (validation)
Chander Sudanthi (validation)
Stephan Diestelhorst (validation)
Andreas Hansson (code integration, performance opt.)
Eric Van Hensbergen (performance opt.)
Gabe Black
2014-01-24 22:29:34 +01:00
|
|
|
numPhysFloatRegs = 192
|
2012-01-26 22:44:43 +01:00
|
|
|
numIQEntries = 32
|
|
|
|
numROBEntries = 40
|
|
|
|
|
2013-01-07 19:05:45 +01:00
|
|
|
switched_out = False
|
2013-01-24 19:28:51 +01:00
|
|
|
branchPred = O3_ARM_v7a_BP()
|
2012-01-26 22:44:43 +01:00
|
|
|
|
|
|
|
# Instruction Cache
|
2015-08-21 13:03:23 +02:00
|
|
|
class O3_ARM_v7a_ICache(Cache):
|
2012-10-15 14:10:54 +02:00
|
|
|
hit_latency = 1
|
|
|
|
response_latency = 1
|
2012-01-26 22:44:43 +01:00
|
|
|
mshrs = 2
|
|
|
|
tgts_per_mshr = 8
|
|
|
|
size = '32kB'
|
|
|
|
assoc = 2
|
2015-07-03 16:14:39 +02:00
|
|
|
is_read_only = True
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
# Writeback clean lines as well
|
|
|
|
writeback_clean = True
|
2012-01-26 22:44:43 +01:00
|
|
|
|
|
|
|
# Data Cache
|
2015-08-21 13:03:23 +02:00
|
|
|
class O3_ARM_v7a_DCache(Cache):
|
2012-10-15 14:10:54 +02:00
|
|
|
hit_latency = 2
|
|
|
|
response_latency = 2
|
2012-01-26 22:44:43 +01:00
|
|
|
mshrs = 6
|
|
|
|
tgts_per_mshr = 8
|
|
|
|
size = '32kB'
|
|
|
|
assoc = 2
|
|
|
|
write_buffers = 16
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
# Consider the L2 a victim cache also for clean lines
|
|
|
|
writeback_clean = True
|
2012-01-26 22:44:43 +01:00
|
|
|
|
2014-06-30 19:50:01 +02:00
|
|
|
# TLB Cache
|
2012-01-26 22:44:43 +01:00
|
|
|
# Use a cache as a L2 TLB
|
2015-08-21 13:03:23 +02:00
|
|
|
class O3_ARM_v7aWalkCache(Cache):
|
2012-10-15 14:10:54 +02:00
|
|
|
hit_latency = 4
|
|
|
|
response_latency = 4
|
2012-01-26 22:44:43 +01:00
|
|
|
mshrs = 6
|
|
|
|
tgts_per_mshr = 8
|
|
|
|
size = '1kB'
|
|
|
|
assoc = 8
|
|
|
|
write_buffers = 16
|
2015-07-03 16:14:39 +02:00
|
|
|
is_read_only = True
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
# Writeback clean lines as well
|
|
|
|
writeback_clean = True
|
2012-01-26 22:44:43 +01:00
|
|
|
|
|
|
|
# L2 Cache
|
2015-08-21 13:03:23 +02:00
|
|
|
class O3_ARM_v7aL2(Cache):
|
2012-10-15 14:10:54 +02:00
|
|
|
hit_latency = 12
|
|
|
|
response_latency = 12
|
2012-01-26 22:44:43 +01:00
|
|
|
mshrs = 16
|
|
|
|
tgts_per_mshr = 8
|
|
|
|
size = '1MB'
|
|
|
|
assoc = 16
|
|
|
|
write_buffers = 8
|
2015-03-27 09:56:10 +01:00
|
|
|
prefetch_on_access = True
|
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if
the cache is mostly inclusive or exclusive. At the moment there is no
intention to support strict policies, and thus the options are: 1)
mostly inclusive, or 2) mostly exclusive.
The choice of policy guides the behaviuor on a cache fill, and a new
helper function, allocOnFill, is created to encapsulate the decision
making process. For the timing mode, the decision is annotated on the
MSHR on sending out the downstream packet, and in atomic we directly
pass the decision to handleFill. We (ab)use the tempBlock in cases
where we are not allocating on fill, leaving the rest of the cache
unaffected. Simple and effective.
This patch also makes it more explicit that multiple caches are
allowed to consider a block writable (this is the case
also before this patch). That is, for a mostly inclusive cache,
multiple caches upstream may also consider the block exclusive. The
caches considering the block writable/exclusive all appear along the
same path to memory, and from a coherency protocol point of view it
works due to the fact that we always snoop upwards in zero time before
querying any downstream cache.
Note that this patch does not introduce clean writebacks. Thus, for
clean lines we are essentially removing a cache level if it is made
mostly exclusive. For example, lines from the read-only L1 instruction
cache or table-walker cache are always clean, and simply get dropped
rather than being passed to the L2. If the L2 is mostly exclusive and
does not allocate on fill it will thus never hold the line. A follow
on patch adds the clean writebacks.
The patch changes the L2 of the O3_ARM_v7a CPU configuration to be
mostly exclusive (and stats are affected accordingly).
2015-11-06 09:26:41 +01:00
|
|
|
clusivity = 'mostly_excl'
|
2012-02-12 23:07:38 +01:00
|
|
|
# Simple stride prefetcher
|
2012-10-15 14:10:54 +02:00
|
|
|
prefetcher = StridePrefetcher(degree=8, latency = 1)
|
2014-07-28 18:22:00 +02:00
|
|
|
tags = RandomRepl()
|