Compare commits

..

No commits in common. "c7f92c43da5b5d96b4287c418beaab3e56a91137" and "66a1016a3548e244f4d96773bfa8985262e4d4b4" have entirely different histories.

19 changed files with 260 additions and 744 deletions

View file

@ -399,7 +399,6 @@ def install_git_style_hooks():
if not git_hooks.exists(): if not git_hooks.exists():
mkdir(git_hooks.get_abspath()) mkdir(git_hooks.get_abspath())
git_hooks.clear()
abs_symlink_hooks = git_hooks.islink() and \ abs_symlink_hooks = git_hooks.islink() and \
os.path.isabs(os.readlink(git_hooks.get_abspath())) os.path.isabs(os.readlink(git_hooks.get_abspath()))

View file

@ -1,4 +1,4 @@
# Copyright (c) 2016-2017 ARM Limited # Copyright (c) 2016 ARM Limited
# All rights reserved. # All rights reserved.
# #
# The license below extends only to copyright in the software and shall # The license below extends only to copyright in the software and shall
@ -44,8 +44,6 @@ m5.util.addToPath('../../')
from common.Caches import * from common.Caches import *
from common import CpuConfig from common import CpuConfig
have_kvm = "kvm" in CpuConfig.cpu_names()
class L1I(L1_ICache): class L1I(L1_ICache):
tag_latency = 1 tag_latency = 1
data_latency = 1 data_latency = 1
@ -172,14 +170,6 @@ class AtomicCluster(CpuCluster):
def addL1(self): def addL1(self):
pass pass
class KvmCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("kvm"), None, None, None, None ]
super(KvmCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class SimpleSystem(LinuxArmSystem): class SimpleSystem(LinuxArmSystem):
cache_line_size = 64 cache_line_size = 64

View file

@ -1,4 +1,4 @@
# Copyright (c) 2016-2017 ARM Limited # Copyright (c) 2016 ARM Limited
# All rights reserved. # All rights reserved.
# #
# The license below extends only to copyright in the software and shall # The license below extends only to copyright in the software and shall
@ -132,7 +132,12 @@ def main():
root = bL.build(options) root = bL.build(options)
addEthernet(root.system, options) addEthernet(root.system, options)
bL.instantiate(options, checkpoint_dir=options.checkpoint_dir) if options.restore_from:
checkpoint_path = os.path.join(options.checkpoint_dir,
options.restore_from)
else:
checkpoint_path = None
bL.instantiate(checkpoint_path)
bL.run(options.checkpoint_dir) bL.run(options.checkpoint_dir)

View file

@ -1,4 +1,4 @@
# Copyright (c) 2016-2017 ARM Limited # Copyright (c) 2016 ARM Limited
# All rights reserved. # All rights reserved.
# #
# The license below extends only to copyright in the software and shall # The license below extends only to copyright in the software and shall
@ -44,7 +44,6 @@ import argparse
import os import os
import sys import sys
import m5 import m5
import m5.util
from m5.objects import * from m5.objects import *
m5.util.addToPath("../../") m5.util.addToPath("../../")
@ -53,7 +52,6 @@ from common import SysPaths
from common import CpuConfig from common import CpuConfig
import devices import devices
from devices import AtomicCluster, KvmCluster
default_dtb = 'armv8_gem5_v1_big_little_2_2.dtb' default_dtb = 'armv8_gem5_v1_big_little_2_2.dtb'
@ -63,21 +61,6 @@ default_rcs = 'bootscript.rcS'
default_mem_size= "2GB" default_mem_size= "2GB"
def _to_ticks(value):
"""Helper function to convert a latency from string format to Ticks"""
return m5.ticks.fromSeconds(m5.util.convert.anyToLatency(value))
def _using_pdes(root):
"""Determine if the simulator is using multiple parallel event queues"""
for obj in root.descendants():
if not m5.proxy.isproxy(obj.eventq_index) and \
obj.eventq_index != root.eventq_index:
return True
return False
class BigCluster(devices.CpuCluster): class BigCluster(devices.CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, def __init__(self, system, num_cpus, cpu_clock,
@ -124,15 +107,6 @@ def createSystem(caches, kernel, bootscript, disks=[]):
return sys return sys
cpu_types = {
"atomic" : (AtomicCluster, AtomicCluster),
"timing" : (BigCluster, LittleCluster),
}
# Only add the KVM CPU if it has been compiled into gem5
if devices.have_kvm:
cpu_types["kvm"] = (KvmCluster, KvmCluster)
def addOptions(parser): def addOptions(parser):
parser.add_argument("--restore-from", type=str, default=None, parser.add_argument("--restore-from", type=str, default=None,
@ -145,9 +119,8 @@ def addOptions(parser):
help="Disks to instantiate") help="Disks to instantiate")
parser.add_argument("--bootscript", type=str, default=default_rcs, parser.add_argument("--bootscript", type=str, default=default_rcs,
help="Linux bootscript") help="Linux bootscript")
parser.add_argument("--cpu-type", type=str, choices=cpu_types.keys(), parser.add_argument("--atomic", action="store_true", default=False,
default="timing", help="Use atomic CPUs")
help="CPU simulation mode. Default: %(default)s")
parser.add_argument("--kernel-init", type=str, default="/sbin/init", parser.add_argument("--kernel-init", type=str, default="/sbin/init",
help="Override init") help="Override init")
parser.add_argument("--big-cpus", type=int, default=1, parser.add_argument("--big-cpus", type=int, default=1,
@ -162,11 +135,9 @@ def addOptions(parser):
help="Big CPU clock frequency") help="Big CPU clock frequency")
parser.add_argument("--little-cpu-clock", type=str, default="1GHz", parser.add_argument("--little-cpu-clock", type=str, default="1GHz",
help="Little CPU clock frequency") help="Little CPU clock frequency")
parser.add_argument("--sim-quantum", type=str, default="1ms",
help="Simulation quantum for parallel simulation. " \
"Default: %(default)s")
return parser return parser
def build(options): def build(options):
m5.ticks.fixGlobalFrequency() m5.ticks.fixGlobalFrequency()
@ -194,31 +165,35 @@ def build(options):
root.system = system root.system = system
system.boot_osflags = " ".join(kernel_cmd) system.boot_osflags = " ".join(kernel_cmd)
AtomicCluster = devices.AtomicCluster
if options.big_cpus + options.little_cpus == 0: if options.big_cpus + options.little_cpus == 0:
m5.util.panic("Empty CPU clusters") m5.util.panic("Empty CPU clusters")
big_model, little_model = cpu_types[options.cpu_type]
all_cpus = []
# big cluster # big cluster
if options.big_cpus > 0: if options.big_cpus > 0:
system.bigCluster = big_model(system, options.big_cpus, if options.atomic:
options.big_cpu_clock) system.bigCluster = AtomicCluster(system, options.big_cpus,
system.mem_mode = system.bigCluster.memoryMode() options.big_cpu_clock)
all_cpus += system.bigCluster.cpus else:
system.bigCluster = BigCluster(system, options.big_cpus,
options.big_cpu_clock)
mem_mode = system.bigCluster.memoryMode()
# little cluster # little cluster
if options.little_cpus > 0: if options.little_cpus > 0:
system.littleCluster = little_model(system, options.little_cpus, if options.atomic:
options.little_cpu_clock) system.littleCluster = AtomicCluster(system, options.little_cpus,
system.mem_mode = system.littleCluster.memoryMode() options.little_cpu_clock)
all_cpus += system.littleCluster.cpus
# Figure out the memory mode else:
if options.big_cpus > 0 and options.little_cpus > 0 and \ system.littleCluster = LittleCluster(system, options.little_cpus,
system.littleCluster.memoryMode() != system.littleCluster.memoryMode(): options.little_cpu_clock)
m5.util.panic("Memory mode missmatch among CPU clusters") mem_mode = system.littleCluster.memoryMode()
if options.big_cpus > 0 and options.little_cpus > 0:
if system.bigCluster.memoryMode() != system.littleCluster.memoryMode():
m5.util.panic("Memory mode missmatch among CPU clusters")
system.mem_mode = mem_mode
# create caches # create caches
system.addCaches(options.caches, options.last_cache_level) system.addCaches(options.caches, options.last_cache_level)
@ -228,52 +203,17 @@ def build(options):
if options.little_cpus > 0 and system.littleCluster.requireCaches(): if options.little_cpus > 0 and system.littleCluster.requireCaches():
m5.util.panic("Little CPU model requires caches") m5.util.panic("Little CPU model requires caches")
# Create a KVM VM and do KVM-specific configuration
if issubclass(big_model, KvmCluster):
_build_kvm(system, all_cpus)
# Linux device tree # Linux device tree
system.dtb_filename = SysPaths.binary(options.dtb) system.dtb_filename = SysPaths.binary(options.dtb)
return root return root
def _build_kvm(system, cpus):
system.kvm_vm = KvmVM()
# Assign KVM CPUs to their own event queues / threads. This
# has to be done after creating caches and other child objects
# since these mustn't inherit the CPU event queue.
if len(cpus) > 1:
device_eq = 0
first_cpu_eq = 1
for idx, cpu in enumerate(cpus):
# Child objects usually inherit the parent's event
# queue. Override that and use the same event queue for
# all devices.
for obj in cpu.descendants():
obj.eventq_index = device_eq
cpu.eventq_index = first_cpu_eq + idx
def instantiate(options, checkpoint_dir=None):
# Setup the simulation quantum if we are running in PDES-mode
# (e.g., when using KVM)
root = Root.getInstance()
if root and _using_pdes(root):
m5.util.inform("Running in PDES mode with a %s simulation quantum.",
options.sim_quantum)
root.sim_quantum = _to_ticks(options.sim_quantum)
def instantiate(checkpoint_path=None):
# Get and load from the chkpt or simpoint checkpoint # Get and load from the chkpt or simpoint checkpoint
if options.restore_from: if checkpoint_path is not None:
if checkpoint_dir and not os.path.isabs(options.restore_from): m5.util.inform("Restoring from checkpoint %s", checkpoint_path)
cpt = os.path.join(checkpoint_dir, options.restore_from) m5.instantiate(checkpoint_path)
else:
cpt = options.restore_from
m5.util.inform("Restoring from checkpoint %s", cpt)
m5.instantiate(cpt)
else: else:
m5.instantiate() m5.instantiate()
@ -301,7 +241,7 @@ def main():
addOptions(parser) addOptions(parser)
options = parser.parse_args() options = parser.parse_args()
root = build(options) root = build(options)
instantiate(options) instantiate(options.restore_from)
run() run()

View file

@ -1,6 +1,6 @@
// -*- mode:c++ -*- // -*- mode:c++ -*-
// Copyright (c) 2010-2013,2017 ARM Limited // Copyright (c) 2010-2013 ARM Limited
// All rights reserved // All rights reserved
// //
// The license below extends only to copyright in the software and shall // The license below extends only to copyright in the software and shall
@ -814,9 +814,7 @@ let {{
mrc14code = ''' mrc14code = '''
MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(op1); MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(op1);
bool can_read, undefined; if (!canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase())) {
std::tie(can_read, undefined) = canReadCoprocReg(miscReg, Scr, Cpsr);
if (!can_read || undefined) {
return std::make_shared<UndefinedInstruction>(machInst, false, return std::make_shared<UndefinedInstruction>(machInst, false,
mnemonic); mnemonic);
} }
@ -838,9 +836,7 @@ let {{
mcr14code = ''' mcr14code = '''
MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest); MiscRegIndex miscReg = (MiscRegIndex) xc->tcBase()->flattenMiscIndex(dest);
bool can_write, undefined; if (!canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase())) {
std::tie(can_write, undefined) = canWriteCoprocReg(miscReg, Scr, Cpsr);
if (undefined || !can_write) {
return std::make_shared<UndefinedInstruction>(machInst, false, return std::make_shared<UndefinedInstruction>(machInst, false,
mnemonic); mnemonic);
} }
@ -865,13 +861,12 @@ let {{
xc->tcBase()->flattenMiscIndex(preFlatOp1); xc->tcBase()->flattenMiscIndex(preFlatOp1);
bool hypTrap = mcrMrc15TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr, Hstr, bool hypTrap = mcrMrc15TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr, Hstr,
Hcptr, imm); Hcptr, imm);
bool can_read, undefined; bool canRead = canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
std::tie(can_read, undefined) = canReadCoprocReg(miscReg, Scr, Cpsr);
// if we're in non secure PL1 mode then we can trap regargless of whether // if we're in non secure PL1 mode then we can trap regargless of whether
// the register is accessable, in other modes we trap if only if the register // the register is accessable, in other modes we trap if only if the register
// IS accessable. // IS accessable.
if (undefined || (!can_read && !(hypTrap && !inUserMode(Cpsr) && if (!canRead && !(hypTrap && !inUserMode(Cpsr) && !inSecureState(Scr, Cpsr))) {
!inSecureState(Scr, Cpsr)))) {
return std::make_shared<UndefinedInstruction>(machInst, false, return std::make_shared<UndefinedInstruction>(machInst, false,
mnemonic); mnemonic);
} }
@ -896,14 +891,12 @@ let {{
xc->tcBase()->flattenMiscIndex(preFlatDest); xc->tcBase()->flattenMiscIndex(preFlatDest);
bool hypTrap = mcrMrc15TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr, Hstr, bool hypTrap = mcrMrc15TrapToHyp(miscReg, Hcr, Cpsr, Scr, Hdcr, Hstr,
Hcptr, imm); Hcptr, imm);
bool can_write, undefined; bool canWrite = canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
std::tie(can_write, undefined) = canWriteCoprocReg(miscReg, Scr, Cpsr);
// if we're in non secure PL1 mode then we can trap regargless of whether // if we're in non secure PL1 mode then we can trap regargless of whether
// the register is accessable, in other modes we trap if only if the register // the register is accessable, in other modes we trap if only if the register
// IS accessable. // IS accessable.
if (undefined || (!can_write && !(hypTrap && !inUserMode(Cpsr) && if (!canWrite & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
!inSecureState(Scr, Cpsr)))) {
return std::make_shared<UndefinedInstruction>(machInst, false, return std::make_shared<UndefinedInstruction>(machInst, false,
mnemonic); mnemonic);
} }
@ -927,13 +920,12 @@ let {{
MiscRegIndex miscReg = (MiscRegIndex) MiscRegIndex miscReg = (MiscRegIndex)
xc->tcBase()->flattenMiscIndex(preFlatOp1); xc->tcBase()->flattenMiscIndex(preFlatOp1);
bool hypTrap = mcrrMrrc15TrapToHyp(miscReg, Cpsr, Scr, Hstr, Hcr, imm); bool hypTrap = mcrrMrrc15TrapToHyp(miscReg, Cpsr, Scr, Hstr, Hcr, imm);
bool can_read, undefined; bool canRead = canReadCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
std::tie(can_read, undefined) = canReadCoprocReg(miscReg, Scr, Cpsr);
// if we're in non secure PL1 mode then we can trap regargless of whether // if we're in non secure PL1 mode then we can trap regargless of whether
// the register is accessable, in other modes we trap if only if the register // the register is accessable, in other modes we trap if only if the register
// IS accessable. // IS accessable.
if (undefined || (!can_read && !(hypTrap && !inUserMode(Cpsr) && if (!canRead && !(hypTrap && !inUserMode(Cpsr) && !inSecureState(Scr, Cpsr))) {
!inSecureState(Scr, Cpsr)))) {
return std::make_shared<UndefinedInstruction>(machInst, false, return std::make_shared<UndefinedInstruction>(machInst, false,
mnemonic); mnemonic);
} }
@ -957,14 +949,12 @@ let {{
MiscRegIndex miscReg = (MiscRegIndex) MiscRegIndex miscReg = (MiscRegIndex)
xc->tcBase()->flattenMiscIndex(preFlatDest); xc->tcBase()->flattenMiscIndex(preFlatDest);
bool hypTrap = mcrrMrrc15TrapToHyp(miscReg, Cpsr, Scr, Hstr, Hcr, imm); bool hypTrap = mcrrMrrc15TrapToHyp(miscReg, Cpsr, Scr, Hstr, Hcr, imm);
bool can_write, undefined; bool canWrite = canWriteCoprocReg(miscReg, Scr, Cpsr, xc->tcBase());
std::tie(can_write, undefined) = canWriteCoprocReg(miscReg, Scr, Cpsr);
// if we're in non secure PL1 mode then we can trap regargless of whether // if we're in non secure PL1 mode then we can trap regargless of whether
// the register is accessable, in other modes we trap if only if the register // the register is accessable, in other modes we trap if only if the register
// IS accessable. // IS accessable.
if (undefined || (!can_write && !(hypTrap && !inUserMode(Cpsr) && if (!canWrite & !(hypTrap & !inUserMode(Cpsr) & !inSecureState(Scr, Cpsr))) {
!inSecureState(Scr, Cpsr)))) {
return std::make_shared<UndefinedInstruction>(machInst, false, return std::make_shared<UndefinedInstruction>(machInst, false,
mnemonic); mnemonic);
} }

View file

@ -43,7 +43,6 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include "arch/arm/kvm/base_cpu.hh" #include "arch/arm/kvm/base_cpu.hh"
#include "debug/GIC.hh"
#include "debug/Interrupt.hh" #include "debug/Interrupt.hh"
#include "params/MuxingKvmGic.hh" #include "params/MuxingKvmGic.hh"
@ -105,63 +104,6 @@ KvmKernelGicV2::setIntState(unsigned type, unsigned vcpu, unsigned irq,
vm.setIRQLine(line, high); vm.setIRQLine(line, high);
} }
uint32_t
KvmKernelGicV2::getGicReg(unsigned group, unsigned vcpu, unsigned offset)
{
uint64_t reg;
assert(vcpu <= KVM_ARM_IRQ_VCPU_MASK);
const uint32_t attr(
(vcpu << KVM_DEV_ARM_VGIC_CPUID_SHIFT) |
(offset << KVM_DEV_ARM_VGIC_OFFSET_SHIFT));
kdev.getAttrPtr(group, attr, &reg);
return (uint32_t) reg;
}
void
KvmKernelGicV2::setGicReg(unsigned group, unsigned vcpu, unsigned offset,
unsigned value)
{
uint64_t reg = value;
assert(vcpu <= KVM_ARM_IRQ_VCPU_MASK);
const uint32_t attr(
(vcpu << KVM_DEV_ARM_VGIC_CPUID_SHIFT) |
(offset << KVM_DEV_ARM_VGIC_OFFSET_SHIFT));
kdev.setAttrPtr(group, attr, &reg);
}
uint32_t
KvmKernelGicV2::readDistributor(ContextID ctx, Addr daddr)
{
auto vcpu = vm.contextIdToVCpuId(ctx);
return getGicReg(KVM_DEV_ARM_VGIC_GRP_DIST_REGS, vcpu, daddr);
}
uint32_t
KvmKernelGicV2::readCpu(ContextID ctx, Addr daddr)
{
auto vcpu = vm.contextIdToVCpuId(ctx);
return getGicReg(KVM_DEV_ARM_VGIC_GRP_CPU_REGS, vcpu, daddr);
}
void
KvmKernelGicV2::writeDistributor(ContextID ctx, Addr daddr, uint32_t data)
{
auto vcpu = vm.contextIdToVCpuId(ctx);
setGicReg(KVM_DEV_ARM_VGIC_GRP_DIST_REGS, vcpu, daddr, data);
}
void
KvmKernelGicV2::writeCpu(ContextID ctx, Addr daddr, uint32_t data)
{
auto vcpu = vm.contextIdToVCpuId(ctx);
setGicReg(KVM_DEV_ARM_VGIC_GRP_CPU_REGS, vcpu, daddr, data);
}
MuxingKvmGic::MuxingKvmGic(const MuxingKvmGicParams *p) MuxingKvmGic::MuxingKvmGic(const MuxingKvmGicParams *p)
: Pl390(p), : Pl390(p),
@ -179,39 +121,21 @@ MuxingKvmGic::~MuxingKvmGic()
{ {
} }
void
MuxingKvmGic::loadState(CheckpointIn &cp)
{
Pl390::loadState(cp);
}
void void
MuxingKvmGic::startup() MuxingKvmGic::startup()
{ {
Pl390::startup();
usingKvm = (kernelGic != nullptr) && validKvmEnvironment(); usingKvm = (kernelGic != nullptr) && validKvmEnvironment();
if (usingKvm)
fromPl390ToKvm();
}
DrainState
MuxingKvmGic::drain()
{
if (usingKvm)
fromKvmToPl390();
return Pl390::drain();
} }
void void
MuxingKvmGic::drainResume() MuxingKvmGic::drainResume()
{ {
Pl390::drainResume();
bool use_kvm = (kernelGic != nullptr) && validKvmEnvironment(); bool use_kvm = (kernelGic != nullptr) && validKvmEnvironment();
if (use_kvm != usingKvm) { if (use_kvm != usingKvm) {
// Should only occur due to CPU switches
if (use_kvm) // from simulation to KVM emulation if (use_kvm) // from simulation to KVM emulation
fromPl390ToKvm(); fromPl390ToKvm();
// otherwise, drain() already sync'd the state back to the Pl390 else // from KVM emulation to simulation
fromKvmToPl390();
usingKvm = use_kvm; usingKvm = use_kvm;
} }
@ -220,14 +144,19 @@ MuxingKvmGic::drainResume()
void void
MuxingKvmGic::serialize(CheckpointOut &cp) const MuxingKvmGic::serialize(CheckpointOut &cp) const
{ {
// drain() already ensured Pl390 updated with KvmGic state if necessary if (!usingKvm)
Pl390::serialize(cp); return Pl390::serialize(cp);
panic("Checkpointing unsupported\n");
} }
void void
MuxingKvmGic::unserialize(CheckpointIn &cp) MuxingKvmGic::unserialize(CheckpointIn &cp)
{ {
Pl390::unserialize(cp); if (!usingKvm)
return Pl390::unserialize(cp);
panic("Checkpointing unsupported\n");
} }
Tick Tick
@ -301,150 +230,16 @@ MuxingKvmGic::validKvmEnvironment() const
return true; return true;
} }
void
MuxingKvmGic::copyDistRegister(BaseGicRegisters* from, BaseGicRegisters* to,
ContextID ctx, Addr daddr)
{
auto val = from->readDistributor(ctx, daddr);
DPRINTF(GIC, "copy dist 0x%x 0x%08x\n", daddr, val);
to->writeDistributor(ctx, daddr, val);
}
void
MuxingKvmGic::copyCpuRegister(BaseGicRegisters* from, BaseGicRegisters* to,
ContextID ctx, Addr daddr)
{
auto val = from->readCpu(ctx, daddr);
DPRINTF(GIC, "copy cpu 0x%x 0x%08x\n", daddr, val);
to->writeCpu(ctx, daddr, val);
}
void
MuxingKvmGic::copyBankedDistRange(BaseGicRegisters* from, BaseGicRegisters* to,
Addr daddr, size_t size)
{
for (int ctx = 0; ctx < system._numContexts; ++ctx)
for (auto a = daddr; a < daddr + size; a += 4)
copyDistRegister(from, to, ctx, a);
}
void
MuxingKvmGic::clearBankedDistRange(BaseGicRegisters* to,
Addr daddr, size_t size)
{
for (int ctx = 0; ctx < system._numContexts; ++ctx)
for (auto a = daddr; a < daddr + size; a += 4)
to->writeDistributor(ctx, a, 0xFFFFFFFF);
}
void
MuxingKvmGic::copyDistRange(BaseGicRegisters* from, BaseGicRegisters* to,
Addr daddr, size_t size)
{
for (auto a = daddr; a < daddr + size; a += 4)
copyDistRegister(from, to, 0, a);
}
void
MuxingKvmGic::clearDistRange(BaseGicRegisters* to,
Addr daddr, size_t size)
{
for (auto a = daddr; a < daddr + size; a += 4)
to->writeDistributor(0, a, 0xFFFFFFFF);
}
void
MuxingKvmGic::copyGicState(BaseGicRegisters* from, BaseGicRegisters* to)
{
Addr set, clear;
size_t size;
/// CPU state (GICC_*)
// Copy CPU Interface Control Register (CTLR),
// Interrupt Priority Mask Register (PMR), and
// Binary Point Register (BPR)
for (int ctx = 0; ctx < system._numContexts; ++ctx) {
copyCpuRegister(from, to, ctx, GICC_CTLR);
copyCpuRegister(from, to, ctx, GICC_PMR);
copyCpuRegister(from, to, ctx, GICC_BPR);
}
/// Distributor state (GICD_*)
// Copy Distributor Control Register (CTLR)
copyDistRegister(from, to, 0, GICD_CTLR);
// Copy interrupt-enabled statuses (I[CS]ENABLERn; R0 is per-CPU banked)
set = Pl390::GICD_ISENABLER.start();
clear = Pl390::GICD_ICENABLER.start();
size = Pl390::itLines / 8;
clearBankedDistRange(to, clear, 4);
copyBankedDistRange(from, to, set, 4);
set += 4, clear += 4, size -= 4;
clearDistRange(to, clear, size);
copyDistRange(from, to, set, size);
// Copy pending interrupts (I[CS]PENDRn; R0 is per-CPU banked)
set = Pl390::GICD_ISPENDR.start();
clear = Pl390::GICD_ICPENDR.start();
size = Pl390::itLines / 8;
clearBankedDistRange(to, clear, 4);
copyBankedDistRange(from, to, set, 4);
set += 4, clear += 4, size -= 4;
clearDistRange(to, clear, size);
copyDistRange(from, to, set, size);
// Copy active interrupts (I[CS]ACTIVERn; R0 is per-CPU banked)
set = Pl390::GICD_ISACTIVER.start();
clear = Pl390::GICD_ICACTIVER.start();
size = Pl390::itLines / 8;
clearBankedDistRange(to, clear, 4);
copyBankedDistRange(from, to, set, 4);
set += 4, clear += 4, size -= 4;
clearDistRange(to, clear, size);
copyDistRange(from, to, set, size);
// Copy interrupt priorities (IPRIORITYRn; R0-7 are per-CPU banked)
set = Pl390::GICD_IPRIORITYR.start();
copyBankedDistRange(from, to, set, 32);
set += 32;
size = Pl390::itLines - 32;
copyDistRange(from, to, set, size);
// Copy interrupt processor target regs (ITARGETRn; R0-7 are read-only)
set = Pl390::GICD_ITARGETSR.start() + 32;
size = Pl390::itLines - 32;
copyDistRange(from, to, set, size);
// Copy interrupt configuration registers (ICFGRn)
set = Pl390::GICD_ICFGR.start();
size = Pl390::itLines / 4;
copyDistRange(from, to, set, size);
}
void void
MuxingKvmGic::fromPl390ToKvm() MuxingKvmGic::fromPl390ToKvm()
{ {
copyGicState(static_cast<Pl390*>(this), kernelGic); panic("Gic multiplexing not implemented.\n");
} }
void void
MuxingKvmGic::fromKvmToPl390() MuxingKvmGic::fromKvmToPl390()
{ {
copyGicState(kernelGic, static_cast<Pl390*>(this)); panic("Gic multiplexing not implemented.\n");
// the values read for the Interrupt Priority Mask Register (PMR)
// have been shifted by three bits due to its having been emulated by
// a VGIC with only 5 PMR bits in its VMCR register. Presently the
// Linux kernel does not repair this inaccuracy, so we correct it here.
for (int cpu = 0; cpu < system._numContexts; ++cpu) {
cpuPriority[cpu] <<= 3;
assert((cpuPriority[cpu] & ~0xff) == 0);
}
} }
MuxingKvmGic * MuxingKvmGic *

View file

@ -54,7 +54,7 @@
* model. It exposes an API that is similar to that of * model. It exposes an API that is similar to that of
* software-emulated GIC models in gem5. * software-emulated GIC models in gem5.
*/ */
class KvmKernelGicV2 : public BaseGicRegisters class KvmKernelGicV2
{ {
public: public:
/** /**
@ -117,14 +117,6 @@ class KvmKernelGicV2 : public BaseGicRegisters
/** Address range for the distributor interface */ /** Address range for the distributor interface */
const AddrRange distRange; const AddrRange distRange;
/** BaseGicRegisters interface */
uint32_t readDistributor(ContextID ctx, Addr daddr) override;
uint32_t readCpu(ContextID ctx, Addr daddr) override;
void writeDistributor(ContextID ctx, Addr daddr,
uint32_t data) override;
void writeCpu(ContextID ctx, Addr daddr, uint32_t data) override;
/* @} */ /* @} */
protected: protected:
@ -138,26 +130,6 @@ class KvmKernelGicV2 : public BaseGicRegisters
*/ */
void setIntState(unsigned type, unsigned vcpu, unsigned irq, bool high); void setIntState(unsigned type, unsigned vcpu, unsigned irq, bool high);
/**
* Get value of GIC register "from" a cpu
*
* @param group Distributor or CPU (KVM_DEV_ARM_VGIC_GRP_{DIST,CPU}_REGS)
* @param vcpu CPU id within KVM
* @param offset register offset
*/
uint32_t getGicReg(unsigned group, unsigned vcpu, unsigned offset);
/**
* Set value of GIC register "from" a cpu
*
* @param group Distributor or CPU (KVM_DEV_ARM_VGIC_GRP_{DIST,CPU}_REGS)
* @param vcpu CPU id within KVM
* @param offset register offset
* @param value value to set register to
*/
void setGicReg(unsigned group, unsigned vcpu, unsigned offset,
unsigned value);
/** KVM VM in the parent system */ /** KVM VM in the parent system */
KvmVM &vm; KvmVM &vm;
@ -174,10 +146,7 @@ class MuxingKvmGic : public Pl390
MuxingKvmGic(const MuxingKvmGicParams *p); MuxingKvmGic(const MuxingKvmGicParams *p);
~MuxingKvmGic(); ~MuxingKvmGic();
void loadState(CheckpointIn &cp) override;
void startup() override; void startup() override;
DrainState drain() override;
void drainResume() override; void drainResume() override;
void serialize(CheckpointOut &cp) const override; void serialize(CheckpointOut &cp) const override;
@ -207,25 +176,9 @@ class MuxingKvmGic : public Pl390
private: private:
bool usingKvm; bool usingKvm;
/** Multiplexing implementation */ /** Multiplexing implementation: state transfer functions */
void fromPl390ToKvm(); void fromPl390ToKvm();
void fromKvmToPl390(); void fromKvmToPl390();
void copyGicState(BaseGicRegisters* from, BaseGicRegisters* to);
void copyDistRegister(BaseGicRegisters* from, BaseGicRegisters* to,
ContextID ctx, Addr daddr);
void copyCpuRegister(BaseGicRegisters* from, BaseGicRegisters* to,
ContextID ctx, Addr daddr);
void copyBankedDistRange(BaseGicRegisters* from, BaseGicRegisters* to,
Addr daddr, size_t size);
void clearBankedDistRange(BaseGicRegisters* to,
Addr daddr, size_t size);
void copyDistRange(BaseGicRegisters* from, BaseGicRegisters* to,
Addr daddr, size_t size);
void clearDistRange(BaseGicRegisters* to,
Addr daddr, size_t size);
}; };
#endif // __ARCH_ARM_KVM_GIC_HH__ #endif // __ARCH_ARM_KVM_GIC_HH__

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010-2013, 2015-2017 ARM Limited * Copyright (c) 2010-2013, 2015-2016 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -41,8 +41,6 @@
#include "arch/arm/miscregs.hh" #include "arch/arm/miscregs.hh"
#include <tuple>
#include "arch/arm/isa.hh" #include "arch/arm/isa.hh"
#include "base/misc.hh" #include "base/misc.hh"
#include "cpu/thread_context.hh" #include "cpu/thread_context.hh"
@ -1969,12 +1967,11 @@ decodeCP15Reg64(unsigned crm, unsigned opc1)
return MISCREG_CP15_UNIMPL; return MISCREG_CP15_UNIMPL;
} }
std::tuple<bool, bool> bool
canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr) canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc)
{ {
bool secure = !scr.ns; bool secure = !scr.ns;
bool canRead = false; bool canRead;
bool undefined = false;
switch (cpsr.mode) { switch (cpsr.mode) {
case MODE_USER: case MODE_USER:
@ -1998,19 +1995,18 @@ canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr)
canRead = miscRegInfo[reg][MISCREG_HYP_RD]; canRead = miscRegInfo[reg][MISCREG_HYP_RD];
break; break;
default: default:
undefined = true; panic("Unrecognized mode setting in CPSR.\n");
} }
// can't do permissions checkes on the root of a banked pair of regs // can't do permissions checkes on the root of a banked pair of regs
assert(!miscRegInfo[reg][MISCREG_BANKED]); assert(!miscRegInfo[reg][MISCREG_BANKED]);
return std::make_tuple(canRead, undefined); return canRead;
} }
std::tuple<bool, bool> bool
canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr) canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr, ThreadContext *tc)
{ {
bool secure = !scr.ns; bool secure = !scr.ns;
bool canWrite = false; bool canWrite;
bool undefined = false;
switch (cpsr.mode) { switch (cpsr.mode) {
case MODE_USER: case MODE_USER:
@ -2034,11 +2030,11 @@ canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr)
canWrite = miscRegInfo[reg][MISCREG_HYP_WR]; canWrite = miscRegInfo[reg][MISCREG_HYP_WR];
break; break;
default: default:
undefined = true; panic("Unrecognized mode setting in CPSR.\n");
} }
// can't do permissions checkes on the root of a banked pair of regs // can't do permissions checkes on the root of a banked pair of regs
assert(!miscRegInfo[reg][MISCREG_BANKED]); assert(!miscRegInfo[reg][MISCREG_BANKED]);
return std::make_tuple(canWrite, undefined); return canWrite;
} }
int int

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010-2017 ARM Limited * Copyright (c) 2010-2016 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -44,7 +44,6 @@
#define __ARCH_ARM_MISCREGS_HH__ #define __ARCH_ARM_MISCREGS_HH__
#include <bitset> #include <bitset>
#include <tuple>
#include "base/bitunion.hh" #include "base/bitunion.hh"
#include "base/compiler.hh" #include "base/compiler.hh"
@ -1848,37 +1847,13 @@ namespace ArmISA
EndBitUnion(CPTR) EndBitUnion(CPTR)
/** // Checks read access permissions to coproc. registers
* Check for permission to read coprocessor registers. bool canReadCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr,
* ThreadContext *tc);
* Checks whether an instruction at the current program mode has
* permissions to read the coprocessor registers. This function
* returns whether the check is undefined and if not whether the
* read access is permitted.
*
* @param the misc reg indicating the coprocessor
* @param the SCR
* @param the CPSR
* @return a tuple of booleans: can_read, undefined
*/
std::tuple<bool, bool> canReadCoprocReg(MiscRegIndex reg, SCR scr,
CPSR cpsr);
/** // Checks write access permissions to coproc. registers
* Check for permission to write coprocessor registers. bool canWriteCoprocReg(MiscRegIndex reg, SCR scr, CPSR cpsr,
* ThreadContext *tc);
* Checks whether an instruction at the current program mode has
* permissions to write the coprocessor registers. This function
* returns whether the check is undefined and if not whether the
* write access is permitted.
*
* @param the misc reg indicating the coprocessor
* @param the SCR
* @param the CPSR
* @return a tuple of booleans: can_write, undefined
*/
std::tuple<bool, bool> canWriteCoprocReg(MiscRegIndex reg, SCR scr,
CPSR cpsr);
// Checks read access permissions to AArch64 system registers // Checks read access permissions to AArch64 system registers
bool canReadAArch64SysReg(MiscRegIndex reg, SCR scr, CPSR cpsr, bool canReadAArch64SysReg(MiscRegIndex reg, SCR scr, CPSR cpsr,

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2010, 2012-2017 ARM Limited * Copyright (c) 2010, 2012-2016 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -1342,10 +1342,7 @@ TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
attr_hi == 2 ? 2 : 1; attr_hi == 2 ? 2 : 1;
te.innerAttrs = attr_lo == 1 ? 0 : te.innerAttrs = attr_lo == 1 ? 0 :
attr_lo == 2 ? 6 : 5; attr_lo == 2 ? 6 : 5;
// Treat write-through memory as uncacheable, this is safe te.nonCacheable = (attr_hi == 1) || (attr_lo == 1);
// but for performance reasons not optimal.
te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
(attr_lo == 1) || (attr_lo == 2);
} }
} else { } else {
uint8_t attrIndx = lDescriptor.attrIndx(); uint8_t attrIndx = lDescriptor.attrIndx();
@ -1380,25 +1377,9 @@ TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te,
// Cacheability // Cacheability
te.nonCacheable = false; te.nonCacheable = false;
if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
te.nonCacheable = true; attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
} attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
// Treat write-through memory as uncacheable, this is safe
// but for performance reasons not optimal.
switch (attr_hi) {
case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
case 0x4: // Normal memory, Outer Non-cacheable
case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
te.nonCacheable = true;
}
switch (attr_lo) {
case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
warn_if(!attr_hi, "Unpredictable behavior");
case 0x4: // Device-nGnRE memory or
// Normal memory, Inner Non-cacheable
case 0x8: // Device-nGRE memory or
// Normal memory, Inner Write-through non-transient
te.nonCacheable = true; te.nonCacheable = true;
} }

View file

@ -106,7 +106,6 @@ class BaseKvmCPU : public BaseCPU
void deallocateContext(ThreadID thread_num); void deallocateContext(ThreadID thread_num);
void haltContext(ThreadID thread_num) override; void haltContext(ThreadID thread_num) override;
long getVCpuID() const { return vcpuID; }
ThreadContext *getContext(int tn) override; ThreadContext *getContext(int tn) override;
Counter totalInsts() const override; Counter totalInsts() const override;

View file

@ -50,7 +50,6 @@
#include <cerrno> #include <cerrno>
#include <memory> #include <memory>
#include "cpu/kvm/base.hh"
#include "debug/Kvm.hh" #include "debug/Kvm.hh"
#include "params/KvmVM.hh" #include "params/KvmVM.hh"
#include "sim/system.hh" #include "sim/system.hh"
@ -529,21 +528,12 @@ KvmVM::createDevice(uint32_t type, uint32_t flags)
} }
void void
KvmVM::setSystem(System *s) KvmVM::setSystem(System *s) {
{
panic_if(system != nullptr, "setSystem() can only be called once"); panic_if(system != nullptr, "setSystem() can only be called once");
panic_if(s == nullptr, "setSystem() called with null System*"); panic_if(s == nullptr, "setSystem() called with null System*");
system = s; system = s;
} }
long
KvmVM::contextIdToVCpuId(ContextID ctx) const
{
assert(system != nullptr);
return dynamic_cast<BaseKvmCPU*>
(system->getThreadContext(ctx)->getCpuPtr())->getVCpuID();
}
int int
KvmVM::createVCPU(long vcpuID) KvmVM::createVCPU(long vcpuID)
{ {

View file

@ -48,7 +48,6 @@
// forward declarations // forward declarations
struct KvmVMParams; struct KvmVMParams;
class BaseKvmCPU;
class System; class System;
/** /**
@ -406,11 +405,6 @@ class KvmVM : public SimObject
*/ */
void setSystem(System *s); void setSystem(System *s);
/**
* Get the VCPUID for a given context
*/
long contextIdToVCpuId(ContextID ctx) const;
#if defined(__aarch64__) #if defined(__aarch64__)
public: // ARM-specific public: // ARM-specific
/** /**

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012-2013, 2017 ARM Limited * Copyright (c) 2012-2013 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -92,15 +92,4 @@ class BaseGic : public PioDevice
Platform *platform; Platform *platform;
}; };
class BaseGicRegisters
{
public:
virtual uint32_t readDistributor(ContextID ctx, Addr daddr) = 0;
virtual uint32_t readCpu(ContextID ctx, Addr daddr) = 0;
virtual void writeDistributor(ContextID ctx, Addr daddr,
uint32_t data) = 0;
virtual void writeCpu(ContextID ctx, Addr daddr, uint32_t data) = 0;
};
#endif #endif

View file

@ -83,7 +83,7 @@ Pl390::Pl390(const Params *p)
iccrpr[x] = 0xff; iccrpr[x] = 0xff;
cpuEnabled[x] = false; cpuEnabled[x] = false;
cpuPriority[x] = 0xff; cpuPriority[x] = 0xff;
cpuBpr[x] = GICC_BPR_MINIMUM; cpuBpr[x] = 0;
// Initialize cpu highest int // Initialize cpu highest int
cpuHighestInt[x] = SPURIOUS_INT; cpuHighestInt[x] = SPURIOUS_INT;
postIntEvent[x] = new PostIntEvent(*this, x); postIntEvent[x] = new PostIntEvent(*this, x);
@ -129,64 +129,46 @@ Pl390::readDistributor(PacketPtr pkt)
DPRINTF(GIC, "gic distributor read register %#x\n", daddr); DPRINTF(GIC, "gic distributor read register %#x\n", daddr);
const uint32_t resp = readDistributor(ctx, daddr, pkt->getSize());
switch (pkt->getSize()) {
case 1:
pkt->set<uint8_t>(resp);
break;
case 2:
pkt->set<uint16_t>(resp);
break;
case 4:
pkt->set<uint32_t>(resp);
break;
default:
panic("Invalid size while reading Distributor regs in GIC: %d\n",
pkt->getSize());
}
pkt->makeAtomicResponse();
return distPioDelay;
}
uint32_t
Pl390::readDistributor(ContextID ctx, Addr daddr, size_t resp_sz)
{
if (GICD_ISENABLER.contains(daddr)) { if (GICD_ISENABLER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2; uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2;
assert(ix < 32); assert(ix < 32);
return getIntEnabled(ctx, ix); pkt->set<uint32_t>(getIntEnabled(ctx, ix));
goto done;
} }
if (GICD_ICENABLER.contains(daddr)) { if (GICD_ICENABLER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2; uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2;
assert(ix < 32); assert(ix < 32);
return getIntEnabled(ctx, ix); pkt->set<uint32_t>(getIntEnabled(ctx, ix));
goto done;
} }
if (GICD_ISPENDR.contains(daddr)) { if (GICD_ISPENDR.contains(daddr)) {
uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2; uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2;
assert(ix < 32); assert(ix < 32);
return getPendingInt(ctx, ix); pkt->set<uint32_t>(getPendingInt(ctx, ix));
goto done;
} }
if (GICD_ICPENDR.contains(daddr)) { if (GICD_ICPENDR.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2; uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2;
assert(ix < 32); assert(ix < 32);
return getPendingInt(ctx, ix); pkt->set<uint32_t>(getPendingInt(ctx, ix));
goto done;
} }
if (GICD_ISACTIVER.contains(daddr)) { if (GICD_ISACTIVER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2; uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2;
assert(ix < 32); assert(ix < 32);
return getActiveInt(ctx, ix); pkt->set<uint32_t>(getActiveInt(ctx, ix));
goto done;
} }
if (GICD_ICACTIVER.contains(daddr)) { if (GICD_ICACTIVER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2; uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2;
assert(ix < 32); assert(ix < 32);
return getActiveInt(ctx, ix); pkt->set<uint32_t>(getActiveInt(ctx, ix));
goto done;
} }
if (GICD_IPRIORITYR.contains(daddr)) { if (GICD_IPRIORITYR.contains(daddr)) {
@ -194,21 +176,27 @@ Pl390::readDistributor(ContextID ctx, Addr daddr, size_t resp_sz)
assert(int_num < INT_LINES_MAX); assert(int_num < INT_LINES_MAX);
DPRINTF(Interrupt, "Reading interrupt priority at int# %#x \n",int_num); DPRINTF(Interrupt, "Reading interrupt priority at int# %#x \n",int_num);
switch (resp_sz) { switch (pkt->getSize()) {
default: // will panic() after return to caller anyway
case 1: case 1:
return getIntPriority(ctx, int_num); pkt->set<uint8_t>(getIntPriority(ctx, int_num));
break;
case 2: case 2:
assert((int_num + 1) < INT_LINES_MAX); assert((int_num + 1) < INT_LINES_MAX);
return (getIntPriority(ctx, int_num) | pkt->set<uint16_t>(getIntPriority(ctx, int_num) |
getIntPriority(ctx, int_num+1) << 8); getIntPriority(ctx, int_num+1) << 8);
break;
case 4: case 4:
assert((int_num + 3) < INT_LINES_MAX); assert((int_num + 3) < INT_LINES_MAX);
return (getIntPriority(ctx, int_num) | pkt->set<uint32_t>(getIntPriority(ctx, int_num) |
getIntPriority(ctx, int_num+1) << 8 | getIntPriority(ctx, int_num+1) << 8 |
getIntPriority(ctx, int_num+2) << 16 | getIntPriority(ctx, int_num+2) << 16 |
getIntPriority(ctx, int_num+3) << 24); getIntPriority(ctx, int_num+3) << 24);
break;
default:
panic("Invalid size while reading priority regs in GIC: %d\n",
pkt->getSize());
} }
goto done;
} }
if (GICD_ITARGETSR.contains(daddr)) { if (GICD_ITARGETSR.contains(daddr)) {
@ -217,16 +205,33 @@ Pl390::readDistributor(ContextID ctx, Addr daddr, size_t resp_sz)
int_num); int_num);
assert(int_num < INT_LINES_MAX); assert(int_num < INT_LINES_MAX);
if (resp_sz == 1) { // First 31 interrupts only target single processor (SGI)
return getCpuTarget(ctx, int_num); if (int_num > 31) {
if (pkt->getSize() == 1) {
pkt->set<uint8_t>(cpuTarget[int_num]);
} else {
assert(pkt->getSize() == 4);
int_num = mbits(int_num, 31, 2);
pkt->set<uint32_t>(cpuTarget[int_num] |
cpuTarget[int_num+1] << 8 |
cpuTarget[int_num+2] << 16 |
cpuTarget[int_num+3] << 24) ;
}
} else { } else {
assert(resp_sz == 4); assert(ctx < sys->numRunningContexts());
int_num = mbits(int_num, 31, 2); uint32_t ctx_mask;
return (getCpuTarget(ctx, int_num) | if (gem5ExtensionsEnabled) {
getCpuTarget(ctx, int_num+1) << 8 | ctx_mask = ctx;
getCpuTarget(ctx, int_num+2) << 16 | } else {
getCpuTarget(ctx, int_num+3) << 24) ; // convert the CPU id number into a bit mask
ctx_mask = power(2, ctx);
}
// replicate the 8-bit mask 4 times in a 32-bit word
ctx_mask |= ctx_mask << 8;
ctx_mask |= ctx_mask << 16;
pkt->set<uint32_t>(ctx_mask);
} }
goto done;
} }
if (GICD_ICFGR.contains(daddr)) { if (GICD_ICFGR.contains(daddr)) {
@ -234,23 +239,30 @@ Pl390::readDistributor(ContextID ctx, Addr daddr, size_t resp_sz)
assert(ix < 64); assert(ix < 64);
/** @todo software generated interrupts and PPIs /** @todo software generated interrupts and PPIs
* can't be configured in some ways */ * can't be configured in some ways */
return intConfig[ix]; pkt->set<uint32_t>(intConfig[ix]);
goto done;
} }
switch(daddr) { switch(daddr) {
case GICD_CTLR: case GICD_CTLR:
return enabled; pkt->set<uint32_t>(enabled);
case GICD_TYPER: break;
case GICD_TYPER: {
/* The 0x100 is a made-up flag to show that gem5 extensions /* The 0x100 is a made-up flag to show that gem5 extensions
* are available, * are available,
* write 0x200 to this register to enable it. */ * write 0x200 to this register to enable it. */
return (((sys->numRunningContexts() - 1) << 5) | uint32_t tmp = ((sys->numRunningContexts() - 1) << 5) |
(itLines/INT_BITS_MAX -1) | (itLines/INT_BITS_MAX -1) |
(haveGem5Extensions ? 0x100 : 0x0)); (haveGem5Extensions ? 0x100 : 0x0);
pkt->set<uint32_t>(tmp);
} break;
default: default:
panic("Tried to read Gic distributor at offset %#x\n", daddr); panic("Tried to read Gic distributor at offset %#x\n", daddr);
break; break;
} }
done:
pkt->makeAtomicResponse();
return distPioDelay;
} }
Tick Tick
@ -265,24 +277,19 @@ Pl390::readCpu(PacketPtr pkt)
DPRINTF(GIC, "gic cpu read register %#x cpu context: %d\n", daddr, DPRINTF(GIC, "gic cpu read register %#x cpu context: %d\n", daddr,
ctx); ctx);
pkt->set<uint32_t>(readCpu(ctx, daddr));
pkt->makeAtomicResponse();
return cpuPioDelay;
}
uint32_t
Pl390::readCpu(ContextID ctx, Addr daddr)
{
switch(daddr) { switch(daddr) {
case GICC_IIDR: case GICC_IIDR:
return 0; pkt->set<uint32_t>(0);
break;
case GICC_CTLR: case GICC_CTLR:
return cpuEnabled[ctx]; pkt->set<uint32_t>(cpuEnabled[ctx]);
break;
case GICC_PMR: case GICC_PMR:
return cpuPriority[ctx]; pkt->set<uint32_t>(cpuPriority[ctx]);
break;
case GICC_BPR: case GICC_BPR:
return cpuBpr[ctx]; pkt->set<uint32_t>(cpuBpr[ctx]);
break;
case GICC_IAR: case GICC_IAR:
if (enabled && cpuEnabled[ctx]) { if (enabled && cpuEnabled[ctx]) {
int active_int = cpuHighestInt[ctx]; int active_int = cpuHighestInt[ctx];
@ -330,22 +337,26 @@ Pl390::readCpu(ContextID ctx, Addr daddr)
ctx, iar.ack_id, iar.cpu_id, iar); ctx, iar.ack_id, iar.cpu_id, iar);
cpuHighestInt[ctx] = SPURIOUS_INT; cpuHighestInt[ctx] = SPURIOUS_INT;
updateIntState(-1); updateIntState(-1);
pkt->set<uint32_t>(iar);
platform->intrctrl->clear(ctx, ArmISA::INT_IRQ, 0); platform->intrctrl->clear(ctx, ArmISA::INT_IRQ, 0);
return iar;
} else { } else {
return SPURIOUS_INT; pkt->set<uint32_t>(SPURIOUS_INT);
} }
break; break;
case GICC_RPR: case GICC_RPR:
return iccrpr[0]; pkt->set<uint32_t>(iccrpr[0]);
break;
case GICC_HPPIR: case GICC_HPPIR:
pkt->set<uint32_t>(0);
panic("Need to implement HPIR"); panic("Need to implement HPIR");
break; break;
default: default:
panic("Tried to read Gic cpu at offset %#x\n", daddr); panic("Tried to read Gic cpu at offset %#x\n", daddr);
break; break;
} }
pkt->makeAtomicResponse();
return cpuPioDelay;
} }
Tick Tick
@ -355,10 +366,9 @@ Pl390::writeDistributor(PacketPtr pkt)
assert(pkt->req->hasContextId()); assert(pkt->req->hasContextId());
const ContextID ctx = pkt->req->contextId(); const ContextID ctx = pkt->req->contextId();
const size_t data_sz = pkt->getSize();
uint32_t pkt_data M5_VAR_USED; uint32_t pkt_data M5_VAR_USED;
switch (data_sz) switch (pkt->getSize())
{ {
case 1: case 1:
pkt_data = pkt->get<uint8_t>(); pkt_data = pkt->get<uint8_t>();
@ -371,143 +381,141 @@ Pl390::writeDistributor(PacketPtr pkt)
break; break;
default: default:
panic("Invalid size when writing to priority regs in Gic: %d\n", panic("Invalid size when writing to priority regs in Gic: %d\n",
data_sz); pkt->getSize());
} }
DPRINTF(GIC, "gic distributor write register %#x size %#x value %#x \n", DPRINTF(GIC, "gic distributor write register %#x size %#x value %#x \n",
daddr, data_sz, pkt_data); daddr, pkt->getSize(), pkt_data);
writeDistributor(ctx, daddr, pkt_data, data_sz);
pkt->makeAtomicResponse();
return distPioDelay;
}
void
Pl390::writeDistributor(ContextID ctx, Addr daddr, uint32_t data,
size_t data_sz)
{
if (GICD_ISENABLER.contains(daddr)) { if (GICD_ISENABLER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2; uint32_t ix = (daddr - GICD_ISENABLER.start()) >> 2;
assert(ix < 32); assert(ix < 32);
getIntEnabled(ctx, ix) |= data; getIntEnabled(ctx, ix) |= pkt->get<uint32_t>();
return; goto done;
} }
if (GICD_ICENABLER.contains(daddr)) { if (GICD_ICENABLER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2; uint32_t ix = (daddr - GICD_ICENABLER.start()) >> 2;
assert(ix < 32); assert(ix < 32);
getIntEnabled(ctx, ix) &= ~data; getIntEnabled(ctx, ix) &= ~pkt->get<uint32_t>();
return; goto done;
} }
if (GICD_ISPENDR.contains(daddr)) { if (GICD_ISPENDR.contains(daddr)) {
uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2; uint32_t ix = (daddr - GICD_ISPENDR.start()) >> 2;
auto mask = data; auto mask = pkt->get<uint32_t>();
if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed
getPendingInt(ctx, ix) |= mask; getPendingInt(ctx, ix) |= mask;
updateIntState(ix); updateIntState(ix);
return; goto done;
} }
if (GICD_ICPENDR.contains(daddr)) { if (GICD_ICPENDR.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2; uint32_t ix = (daddr - GICD_ICPENDR.start()) >> 2;
auto mask = data; auto mask = pkt->get<uint32_t>();
if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed if (ix == 0) mask &= SGI_MASK; // Don't allow SGIs to be changed
getPendingInt(ctx, ix) &= ~mask; getPendingInt(ctx, ix) &= ~mask;
updateIntState(ix); updateIntState(ix);
return; goto done;
} }
if (GICD_ISACTIVER.contains(daddr)) { if (GICD_ISACTIVER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2; uint32_t ix = (daddr - GICD_ISACTIVER.start()) >> 2;
getActiveInt(ctx, ix) |= data; getActiveInt(ctx, ix) |= pkt->get<uint32_t>();
return; goto done;
} }
if (GICD_ICACTIVER.contains(daddr)) { if (GICD_ICACTIVER.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2; uint32_t ix = (daddr - GICD_ICACTIVER.start()) >> 2;
getActiveInt(ctx, ix) &= ~data; getActiveInt(ctx, ix) &= ~pkt->get<uint32_t>();
return; goto done;
} }
if (GICD_IPRIORITYR.contains(daddr)) { if (GICD_IPRIORITYR.contains(daddr)) {
Addr int_num = daddr - GICD_IPRIORITYR.start(); Addr int_num = daddr - GICD_IPRIORITYR.start();
switch(data_sz) { switch(pkt->getSize()) {
case 1: case 1:
getIntPriority(ctx, int_num) = data; getIntPriority(ctx, int_num) = pkt->get<uint8_t>();
break; break;
case 2: { case 2: {
getIntPriority(ctx, int_num) = bits(data, 7, 0); auto tmp16 = pkt->get<uint16_t>();
getIntPriority(ctx, int_num + 1) = bits(data, 15, 8); getIntPriority(ctx, int_num) = bits(tmp16, 7, 0);
getIntPriority(ctx, int_num + 1) = bits(tmp16, 15, 8);
break; break;
} }
case 4: { case 4: {
getIntPriority(ctx, int_num) = bits(data, 7, 0); auto tmp32 = pkt->get<uint32_t>();
getIntPriority(ctx, int_num + 1) = bits(data, 15, 8); getIntPriority(ctx, int_num) = bits(tmp32, 7, 0);
getIntPriority(ctx, int_num + 2) = bits(data, 23, 16); getIntPriority(ctx, int_num + 1) = bits(tmp32, 15, 8);
getIntPriority(ctx, int_num + 3) = bits(data, 31, 24); getIntPriority(ctx, int_num + 2) = bits(tmp32, 23, 16);
getIntPriority(ctx, int_num + 3) = bits(tmp32, 31, 24);
break; break;
} }
default: default:
panic("Invalid size when writing to priority regs in Gic: %d\n", panic("Invalid size when writing to priority regs in Gic: %d\n",
data_sz); pkt->getSize());
} }
updateIntState(-1); updateIntState(-1);
updateRunPri(); updateRunPri();
return; goto done;
} }
if (GICD_ITARGETSR.contains(daddr)) { if (GICD_ITARGETSR.contains(daddr)) {
Addr int_num = daddr - GICD_ITARGETSR.start(); Addr int_num = daddr - GICD_ITARGETSR.start();
// Interrupts 0-31 are read only // First 31 interrupts only target single processor
unsigned offset = SGI_MAX + PPI_MAX; if (int_num >= SGI_MAX) {
if (int_num >= offset) { if (pkt->getSize() == 1) {
unsigned ix = int_num - offset; // index into cpuTarget array uint8_t tmp = pkt->get<uint8_t>();
if (data_sz == 1) { cpuTarget[int_num] = tmp & 0xff;
cpuTarget[ix] = data & 0xff;
} else { } else {
assert (data_sz == 4); assert (pkt->getSize() == 4);
cpuTarget[ix] = bits(data, 7, 0); int_num = mbits(int_num, 31, 2);
cpuTarget[ix+1] = bits(data, 15, 8); uint32_t tmp = pkt->get<uint32_t>();
cpuTarget[ix+2] = bits(data, 23, 16); cpuTarget[int_num] = bits(tmp, 7, 0);
cpuTarget[ix+3] = bits(data, 31, 24); cpuTarget[int_num+1] = bits(tmp, 15, 8);
cpuTarget[int_num+2] = bits(tmp, 23, 16);
cpuTarget[int_num+3] = bits(tmp, 31, 24);
} }
updateIntState(int_num >> 2); updateIntState(int_num >> 2);
} }
return; goto done;
} }
if (GICD_ICFGR.contains(daddr)) { if (GICD_ICFGR.contains(daddr)) {
uint32_t ix = (daddr - GICD_ICFGR.start()) >> 2; uint32_t ix = (daddr - GICD_ICFGR.start()) >> 2;
assert(ix < INT_BITS_MAX*2); assert(ix < INT_BITS_MAX*2);
intConfig[ix] = data; intConfig[ix] = pkt->get<uint32_t>();
if (data & NN_CONFIG_MASK) if (pkt->get<uint32_t>() & NN_CONFIG_MASK)
warn("GIC N:N mode selected and not supported at this time\n"); warn("GIC N:N mode selected and not supported at this time\n");
return; goto done;
} }
switch(daddr) { switch(daddr) {
case GICD_CTLR: case GICD_CTLR:
enabled = data; enabled = pkt->get<uint32_t>();
DPRINTF(Interrupt, "Distributor enable flag set to = %d\n", enabled); DPRINTF(Interrupt, "Distributor enable flag set to = %d\n", enabled);
break; break;
case GICD_TYPER: case GICD_TYPER:
/* 0x200 is a made-up flag to enable gem5 extension functionality. /* 0x200 is a made-up flag to enable gem5 extension functionality.
* This reg is not normally written. * This reg is not normally written.
*/ */
gem5ExtensionsEnabled = (data & 0x200) && haveGem5Extensions; gem5ExtensionsEnabled = (
(pkt->get<uint32_t>() & 0x200) && haveGem5Extensions);
DPRINTF(GIC, "gem5 extensions %s\n", DPRINTF(GIC, "gem5 extensions %s\n",
gem5ExtensionsEnabled ? "enabled" : "disabled"); gem5ExtensionsEnabled ? "enabled" : "disabled");
break; break;
case GICD_SGIR: case GICD_SGIR:
softInt(ctx, data); softInt(ctx, pkt->get<uint32_t>());
break; break;
default: default:
panic("Tried to write Gic distributor at offset %#x\n", daddr); panic("Tried to write Gic distributor at offset %#x\n", daddr);
break; break;
} }
done:
pkt->makeAtomicResponse();
return distPioDelay;
} }
Tick Tick
@ -517,36 +525,23 @@ Pl390::writeCpu(PacketPtr pkt)
assert(pkt->req->hasContextId()); assert(pkt->req->hasContextId());
const ContextID ctx = pkt->req->contextId(); const ContextID ctx = pkt->req->contextId();
const uint32_t data = pkt->get<uint32_t>(); IAR iar;
DPRINTF(GIC, "gic cpu write register cpu:%d %#x val: %#x\n", DPRINTF(GIC, "gic cpu write register cpu:%d %#x val: %#x\n",
ctx, daddr, data); ctx, daddr, pkt->get<uint32_t>());
writeCpu(ctx, daddr, data);
pkt->makeAtomicResponse();
return cpuPioDelay;
}
void
Pl390::writeCpu(ContextID ctx, Addr daddr, uint32_t data)
{
switch(daddr) { switch(daddr) {
case GICC_CTLR: case GICC_CTLR:
cpuEnabled[ctx] = data; cpuEnabled[ctx] = pkt->get<uint32_t>();
break; break;
case GICC_PMR: case GICC_PMR:
cpuPriority[ctx] = data; cpuPriority[ctx] = pkt->get<uint32_t>();
break; break;
case GICC_BPR: { case GICC_BPR:
auto bpr = data & 0x7; cpuBpr[ctx] = pkt->get<uint32_t>();
if (bpr < GICC_BPR_MINIMUM)
bpr = GICC_BPR_MINIMUM;
cpuBpr[ctx] = bpr;
break; break;
} case GICC_EOIR:
case GICC_EOIR: { iar = pkt->get<uint32_t>();
const IAR iar = data;
if (iar.ack_id < SGI_MAX) { if (iar.ack_id < SGI_MAX) {
// Clear out the bit that corresponds to the cleared int // Clear out the bit that corresponds to the cleared int
uint64_t clr_int = ULL(1) << (ctx + 8 * iar.cpu_id); uint64_t clr_int = ULL(1) << (ctx + 8 * iar.cpu_id);
@ -574,12 +569,13 @@ Pl390::writeCpu(ContextID ctx, Addr daddr, uint32_t data)
DPRINTF(Interrupt, "CPU %d done handling intr IAR = %d from cpu %d\n", DPRINTF(Interrupt, "CPU %d done handling intr IAR = %d from cpu %d\n",
ctx, iar.ack_id, iar.cpu_id); ctx, iar.ack_id, iar.cpu_id);
break; break;
}
default: default:
panic("Tried to write Gic cpu at offset %#x\n", daddr); panic("Tried to write Gic cpu at offset %#x\n", daddr);
break; break;
} }
if (cpuEnabled[ctx]) updateIntState(-1); if (cpuEnabled[ctx]) updateIntState(-1);
pkt->makeAtomicResponse();
return cpuPioDelay;
} }
Pl390::BankedRegs& Pl390::BankedRegs&
@ -670,17 +666,6 @@ Pl390::genSwiMask(int cpu)
return ULL(0x0101010101010101) << cpu; return ULL(0x0101010101010101) << cpu;
} }
uint8_t
Pl390::getCpuPriority(unsigned cpu)
{
// see Table 3-2 in IHI0048B.b (GICv2)
// mask some low-order priority bits per BPR value
// NB: the GIC prioritization scheme is upside down:
// lower values are higher priority; masking off bits
// actually creates a higher priority, not lower.
return cpuPriority[cpu] & (0xff00 >> (7 - cpuBpr[cpu]));
}
void void
Pl390::updateIntState(int hint) Pl390::updateIntState(int hint)
{ {
@ -691,7 +676,7 @@ Pl390::updateIntState(int hint)
/*@todo use hint to do less work. */ /*@todo use hint to do less work. */
int highest_int = SPURIOUS_INT; int highest_int = SPURIOUS_INT;
// Priorities below that set in GICC_PMR can be ignored // Priorities below that set in GICC_PMR can be ignored
uint8_t highest_pri = getCpuPriority(cpu); uint8_t highest_pri = cpuPriority[cpu];
// Check SGIs // Check SGIs
for (int swi = 0; swi < SGI_MAX; swi++) { for (int swi = 0; swi < SGI_MAX; swi++) {
@ -732,8 +717,8 @@ Pl390::updateIntState(int hint)
(getIntPriority(cpu, int_nm) < highest_pri)) (getIntPriority(cpu, int_nm) < highest_pri))
if ((!mp_sys) || if ((!mp_sys) ||
(gem5ExtensionsEnabled (gem5ExtensionsEnabled
? (getCpuTarget(cpu, int_nm) == cpu) ? (cpuTarget[int_nm] == cpu)
: (getCpuTarget(cpu, int_nm) & (1 << cpu)))) { : (cpuTarget[int_nm] & (1 << cpu)))) {
highest_pri = getIntPriority(cpu, int_nm); highest_pri = getIntPriority(cpu, int_nm);
highest_int = int_nm; highest_int = int_nm;
} }
@ -748,8 +733,7 @@ Pl390::updateIntState(int hint)
/* @todo make this work for more than one cpu, need to handle 1:N, N:N /* @todo make this work for more than one cpu, need to handle 1:N, N:N
* models */ * models */
if (enabled && cpuEnabled[cpu] && if (enabled && cpuEnabled[cpu] && (highest_pri < cpuPriority[cpu]) &&
(highest_pri < getCpuPriority(cpu)) &&
!(getActiveInt(cpu, intNumToWord(highest_int)) !(getActiveInt(cpu, intNumToWord(highest_int))
& (1 << intNumToBit(highest_int)))) { & (1 << intNumToBit(highest_int)))) {
@ -792,14 +776,13 @@ Pl390::updateRunPri()
void void
Pl390::sendInt(uint32_t num) Pl390::sendInt(uint32_t num)
{ {
uint8_t target = getCpuTarget(0, num);
DPRINTF(Interrupt, "Received Interrupt number %d, cpuTarget %#x: \n", DPRINTF(Interrupt, "Received Interrupt number %d, cpuTarget %#x: \n",
num, target); num, cpuTarget[num]);
if ((target & (target - 1)) && !gem5ExtensionsEnabled) if ((cpuTarget[num] & (cpuTarget[num] - 1)) && !gem5ExtensionsEnabled)
panic("Multiple targets for peripheral interrupts is not supported\n"); panic("Multiple targets for peripheral interrupts is not supported\n");
panic_if(num < SGI_MAX + PPI_MAX, panic_if(num < SGI_MAX + PPI_MAX,
"sentInt() must only be used for interrupts 32 and higher"); "sentInt() must only be used for interrupts 32 and higher");
getPendingInt(target, intNumToWord(num)) |= 1 << intNumToBit(num); getPendingInt(cpuTarget[num], intNumToWord(num)) |= 1 << intNumToBit(num);
updateIntState(intNumToWord(num)); updateIntState(intNumToWord(num));
} }
@ -897,6 +880,7 @@ Pl390::BankedRegs::serialize(CheckpointOut &cp) const
SERIALIZE_SCALAR(pendingInt); SERIALIZE_SCALAR(pendingInt);
SERIALIZE_SCALAR(activeInt); SERIALIZE_SCALAR(activeInt);
SERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX); SERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX);
SERIALIZE_ARRAY(cpuTarget, SGI_MAX + PPI_MAX);
} }
void void
@ -955,6 +939,7 @@ Pl390::BankedRegs::unserialize(CheckpointIn &cp)
UNSERIALIZE_SCALAR(pendingInt); UNSERIALIZE_SCALAR(pendingInt);
UNSERIALIZE_SCALAR(activeInt); UNSERIALIZE_SCALAR(activeInt);
UNSERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX); UNSERIALIZE_ARRAY(intPriority, SGI_MAX + PPI_MAX);
UNSERIALIZE_ARRAY(cpuTarget, SGI_MAX + PPI_MAX);
} }
Pl390 * Pl390 *

View file

@ -58,7 +58,7 @@
#include "dev/platform.hh" #include "dev/platform.hh"
#include "params/Pl390.hh" #include "params/Pl390.hh"
class Pl390 : public BaseGic, public BaseGicRegisters class Pl390 : public BaseGic
{ {
protected: protected:
// distributor memory addresses // distributor memory addresses
@ -111,10 +111,6 @@ class Pl390 : public BaseGic, public BaseGicRegisters
static const int INT_LINES_MAX = 1020; static const int INT_LINES_MAX = 1020;
static const int GLOBAL_INT_LINES = INT_LINES_MAX - SGI_MAX - PPI_MAX; static const int GLOBAL_INT_LINES = INT_LINES_MAX - SGI_MAX - PPI_MAX;
/** minimum value for Binary Point Register ("IMPLEMENTATION DEFINED");
chosen for consistency with Linux's in-kernel KVM GIC model */
static const int GICC_BPR_MINIMUM = 2;
BitUnion32(SWI) BitUnion32(SWI)
Bitfield<3,0> sgi_id; Bitfield<3,0> sgi_id;
Bitfield<23,16> cpu_list; Bitfield<23,16> cpu_list;
@ -176,11 +172,16 @@ class Pl390 : public BaseGic, public BaseGicRegisters
* interrupt priority for SGIs and PPIs */ * interrupt priority for SGIs and PPIs */
uint8_t intPriority[SGI_MAX + PPI_MAX]; uint8_t intPriority[SGI_MAX + PPI_MAX];
/** GICD_ITARGETSR{0..7}
* 8b CPU target ID for each SGI and PPI */
uint8_t cpuTarget[SGI_MAX + PPI_MAX];
void serialize(CheckpointOut &cp) const override; void serialize(CheckpointOut &cp) const override;
void unserialize(CheckpointIn &cp) override; void unserialize(CheckpointIn &cp) override;
BankedRegs() : BankedRegs() :
intEnabled(0), pendingInt(0), activeInt(0), intPriority {0} intEnabled(0), pendingInt(0), activeInt(0),
intPriority {0}, cpuTarget {0}
{} {}
}; };
std::vector<BankedRegs*> bankedRegs; std::vector<BankedRegs*> bankedRegs;
@ -251,23 +252,12 @@ class Pl390 : public BaseGic, public BaseGicRegisters
*/ */
uint8_t cpuTarget[GLOBAL_INT_LINES]; uint8_t cpuTarget[GLOBAL_INT_LINES];
uint8_t getCpuTarget(ContextID ctx, uint32_t ix) { uint8_t& getCpuTarget(ContextID ctx, uint32_t ix) {
assert(ctx < sys->numRunningContexts());
assert(ix < INT_LINES_MAX); assert(ix < INT_LINES_MAX);
if (ix < SGI_MAX + PPI_MAX) { if (ix < SGI_MAX + PPI_MAX) {
// "GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only, and each return getBankedRegs(ctx).cpuTarget[ix];
// field returns a value that corresponds only to the processor
// reading the register."
uint32_t ctx_mask;
if (gem5ExtensionsEnabled) {
ctx_mask = ctx;
} else {
// convert the CPU id number into a bit mask
ctx_mask = power(2, ctx);
}
return ctx_mask;
} else { } else {
return cpuTarget[ix - 32]; return cpuTarget[ix - (SGI_MAX + PPI_MAX)];
} }
} }
@ -280,7 +270,6 @@ class Pl390 : public BaseGic, public BaseGicRegisters
/** CPU priority */ /** CPU priority */
uint8_t cpuPriority[CPU_MAX]; uint8_t cpuPriority[CPU_MAX];
uint8_t getCpuPriority(unsigned cpu); // BPR-adjusted priority value
/** Binary point registers */ /** Binary point registers */
uint8_t cpuBpr[CPU_MAX]; uint8_t cpuBpr[CPU_MAX];
@ -404,34 +393,21 @@ class Pl390 : public BaseGic, public BaseGicRegisters
* @param pkt packet to respond to * @param pkt packet to respond to
*/ */
Tick readDistributor(PacketPtr pkt); Tick readDistributor(PacketPtr pkt);
uint32_t readDistributor(ContextID ctx, Addr daddr,
size_t resp_sz);
uint32_t readDistributor(ContextID ctx, Addr daddr) override {
return readDistributor(ctx, daddr, 4);
}
/** Handle a read to the cpu portion of the GIC /** Handle a read to the cpu portion of the GIC
* @param pkt packet to respond to * @param pkt packet to respond to
*/ */
Tick readCpu(PacketPtr pkt); Tick readCpu(PacketPtr pkt);
uint32_t readCpu(ContextID ctx, Addr daddr) override;
/** Handle a write to the distributor portion of the GIC /** Handle a write to the distributor portion of the GIC
* @param pkt packet to respond to * @param pkt packet to respond to
*/ */
Tick writeDistributor(PacketPtr pkt); Tick writeDistributor(PacketPtr pkt);
void writeDistributor(ContextID ctx, Addr daddr,
uint32_t data, size_t data_sz);
void writeDistributor(ContextID ctx, Addr daddr,
uint32_t data) override {
return writeDistributor(ctx, daddr, data, 4);
}
/** Handle a write to the cpu portion of the GIC /** Handle a write to the cpu portion of the GIC
* @param pkt packet to respond to * @param pkt packet to respond to
*/ */
Tick writeCpu(PacketPtr pkt); Tick writeCpu(PacketPtr pkt);
void writeCpu(ContextID ctx, Addr daddr, uint32_t data) override;
}; };
#endif //__DEV_ARM_GIC_H__ #endif //__DEV_ARM_GIC_H__

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2015, 2017 ARM Limited * Copyright (c) 2012, 2015 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -101,33 +101,14 @@ DrainManager::resume()
"Resuming a system that isn't fully drained, this is untested and " "Resuming a system that isn't fully drained, this is untested and "
"likely to break\n"); "likely to break\n");
panic_if(_state == DrainState::Resuming,
"Resuming a system that is already trying to resume. This should "
"never happen.\n");
panic_if(_count != 0, panic_if(_count != 0,
"Resume called in the middle of a drain cycle. %u objects " "Resume called in the middle of a drain cycle. %u objects "
"left to drain.\n", _count); "left to drain.\n", _count);
// At this point in time the DrainManager and all objects will be DPRINTF(Drain, "Resuming %u objects.\n", drainableCount());
// in the the Drained state. New objects (i.e., objects created
// while resuming) will inherit the Resuming state from the
// DrainManager, which means we have to resume objects until all
// objects are in the Running state.
_state = DrainState::Resuming;
do {
DPRINTF(Drain, "Resuming %u objects.\n", drainableCount());
for (auto *obj : _allDrainable) {
if (obj->drainState() != DrainState::Running) {
assert(obj->drainState() == DrainState::Drained ||
obj->drainState() == DrainState::Resuming);
obj->dmDrainResume();
}
}
} while (!allInState(DrainState::Running));
_state = DrainState::Running; _state = DrainState::Running;
for (auto *obj : _allDrainable)
obj->dmDrainResume();
} }
void void
@ -173,17 +154,6 @@ DrainManager::unregisterDrainable(Drainable *obj)
_allDrainable.erase(o); _allDrainable.erase(o);
} }
bool
DrainManager::allInState(DrainState state) const
{
for (const auto *obj : _allDrainable) {
if (obj->drainState() != state)
return false;
}
return true;
}
size_t size_t
DrainManager::drainableCount() const DrainManager::drainableCount() const
{ {
@ -219,8 +189,7 @@ Drainable::dmDrain()
void void
Drainable::dmDrainResume() Drainable::dmDrainResume()
{ {
panic_if(_drainState != DrainState::Drained && panic_if(_drainState != DrainState::Drained,
_drainState != DrainState::Resuming,
"Trying to resume an object that hasn't been drained\n"); "Trying to resume an object that hasn't been drained\n");
_drainState = DrainState::Running; _drainState = DrainState::Running;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2015, 2017 ARM Limited * Copyright (c) 2012, 2015 ARM Limited
* All rights reserved * All rights reserved
* *
* The license below extends only to copyright in the software and shall * The license below extends only to copyright in the software and shall
@ -58,11 +58,7 @@ class Drainable;
* all objects have entered the Drained state. * all objects have entered the Drained state.
* *
* Before resuming simulation, the simulator calls resume() to * Before resuming simulation, the simulator calls resume() to
* transfer the object to the Running state. This in turn results in a * transfer the object to the Running state.
* call to drainResume() for all Drainable objects in the
* simulator. New Drainable objects may be created while resuming. In
* such cases, the new objects will be created in the Resuming state
* and later resumed.
* *
* \note Even though the state of an object (visible to the rest of * \note Even though the state of an object (visible to the rest of
* the world through Drainable::getState()) could be used to determine * the world through Drainable::getState()) could be used to determine
@ -72,8 +68,7 @@ class Drainable;
enum class DrainState { enum class DrainState {
Running, /** Running normally */ Running, /** Running normally */
Draining, /** Draining buffers pending serialization/handover */ Draining, /** Draining buffers pending serialization/handover */
Drained, /** Buffers drained, ready for serialization/handover */ Drained /** Buffers drained, ready for serialization/handover */
Resuming, /** Transient state while the simulator is resuming */
}; };
#endif #endif
@ -157,12 +152,6 @@ class DrainManager
void unregisterDrainable(Drainable *obj); void unregisterDrainable(Drainable *obj);
private: private:
/**
* Helper function to check if all Drainable objects are in a
* specific state.
*/
bool allInState(DrainState state) const;
/** /**
* Thread-safe helper function to get the number of Drainable * Thread-safe helper function to get the number of Drainable
* objects in a system. * objects in a system.
@ -272,7 +261,6 @@ class Drainable
switch (_drainState) { switch (_drainState) {
case DrainState::Running: case DrainState::Running:
case DrainState::Drained: case DrainState::Drained:
case DrainState::Resuming:
return; return;
case DrainState::Draining: case DrainState::Draining:
_drainState = DrainState::Drained; _drainState = DrainState::Drained;

View file

@ -52,6 +52,7 @@ def upgrader(cpt):
b_intEnabled = intEnabled[0] b_intEnabled = intEnabled[0]
b_pendingInt = pendingInt[0] b_pendingInt = pendingInt[0]
b_activeInt = activeInt[0] b_activeInt = activeInt[0]
b_cpuTarget = cpuTarget[0:32]
del intEnabled[0] del intEnabled[0]
del pendingInt[0] del pendingInt[0]
@ -77,3 +78,4 @@ def upgrader(cpt):
cpt.set(new_sec, 'pendingInt', b_pendingInt) cpt.set(new_sec, 'pendingInt', b_pendingInt)
cpt.set(new_sec, 'activeInt', b_activeInt) cpt.set(new_sec, 'activeInt', b_activeInt)
cpt.set(new_sec, 'intPriority',' '.join(intPriority)) cpt.set(new_sec, 'intPriority',' '.join(intPriority))
cpt.set(new_sec, 'cpuTarget', ' '.join(b_cpuTarget))