2006-05-16 23:36:50 +02:00
|
|
|
/*
|
2012-04-03 09:50:14 +02:00
|
|
|
* Copyright (c) 2012 ARM Limited
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
2006-05-16 23:36:50 +02:00
|
|
|
* Copyright (c) 2002-2005 The Regents of The University of Michigan
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2006-06-01 01:26:56 +02:00
|
|
|
*
|
|
|
|
* Authors: Steve Reinhardt
|
2006-05-16 23:36:50 +02:00
|
|
|
*/
|
|
|
|
|
2006-10-08 19:53:24 +02:00
|
|
|
#include "arch/locked_mem.hh"
|
2011-03-02 08:18:47 +01:00
|
|
|
#include "arch/mmapped_ipr.hh"
|
2006-05-16 23:36:50 +02:00
|
|
|
#include "arch/utility.hh"
|
2007-02-12 19:06:30 +01:00
|
|
|
#include "base/bigint.hh"
|
2009-09-23 17:34:21 +02:00
|
|
|
#include "config/the_isa.hh"
|
2006-05-16 23:36:50 +02:00
|
|
|
#include "cpu/simple/atomic.hh"
|
2011-04-15 19:44:06 +02:00
|
|
|
#include "cpu/exetrace.hh"
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
#include "debug/Drain.hh"
|
2011-04-15 19:44:32 +02:00
|
|
|
#include "debug/ExecFaulting.hh"
|
|
|
|
#include "debug/SimpleCPU.hh"
|
2006-10-20 08:38:45 +02:00
|
|
|
#include "mem/packet.hh"
|
|
|
|
#include "mem/packet_access.hh"
|
2012-04-03 09:50:14 +02:00
|
|
|
#include "mem/physical.hh"
|
2007-07-24 06:51:38 +02:00
|
|
|
#include "params/AtomicSimpleCPU.hh"
|
2010-09-14 04:26:03 +02:00
|
|
|
#include "sim/faults.hh"
|
2006-07-13 02:22:07 +02:00
|
|
|
#include "sim/system.hh"
|
2011-11-01 12:01:13 +01:00
|
|
|
#include "sim/full_system.hh"
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
using namespace TheISA;
|
|
|
|
|
|
|
|
AtomicSimpleCPU::TickEvent::TickEvent(AtomicSimpleCPU *c)
|
2008-10-09 13:58:24 +02:00
|
|
|
: Event(CPU_Tick_Pri), cpu(c)
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
AtomicSimpleCPU::TickEvent::process()
|
|
|
|
{
|
|
|
|
cpu->tick();
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
2008-02-06 22:32:40 +01:00
|
|
|
AtomicSimpleCPU::TickEvent::description() const
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2007-07-01 02:45:58 +02:00
|
|
|
return "AtomicSimpleCPU tick";
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
AtomicSimpleCPU::init()
|
|
|
|
{
|
|
|
|
BaseCPU::init();
|
2012-03-30 15:38:35 +02:00
|
|
|
|
|
|
|
// Initialise the ThreadContext's memory proxies
|
|
|
|
tcBase()->initMemProxies(tcBase());
|
|
|
|
|
2013-01-07 19:05:45 +01:00
|
|
|
if (FullSystem && !params()->switched_out) {
|
2011-11-01 12:01:13 +01:00
|
|
|
ThreadID size = threadContexts.size();
|
|
|
|
for (ThreadID i = 0; i < size; ++i) {
|
|
|
|
ThreadContext *tc = threadContexts[i];
|
|
|
|
// initialize CPU, including PC
|
|
|
|
TheISA::initCPU(tc, tc->contextId());
|
|
|
|
}
|
|
|
|
}
|
2012-01-17 19:55:08 +01:00
|
|
|
|
2008-11-03 03:57:07 +01:00
|
|
|
// Atomic doesn't do MT right now, so contextId == threadId
|
2008-11-03 03:56:57 +01:00
|
|
|
ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
|
|
|
|
data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
|
|
|
|
data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
2008-08-11 21:22:16 +02:00
|
|
|
AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
|
2009-04-19 13:50:07 +02:00
|
|
|
: BaseSimpleCPU(p), tickEvent(this), width(p->width), locked(false),
|
2008-06-18 19:15:21 +02:00
|
|
|
simulate_data_stalls(p->simulate_data_stalls),
|
|
|
|
simulate_inst_stalls(p->simulate_inst_stalls),
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
drain_manager(NULL),
|
2012-07-09 18:35:39 +02:00
|
|
|
icachePort(name() + ".icache_port", this),
|
|
|
|
dcachePort(name() + ".dcache_port", this),
|
2012-04-03 09:50:14 +02:00
|
|
|
fastmem(p->fastmem)
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
|
|
|
_status = Idle;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AtomicSimpleCPU::~AtomicSimpleCPU()
|
|
|
|
{
|
2009-11-18 22:55:58 +01:00
|
|
|
if (tickEvent.scheduled()) {
|
|
|
|
deschedule(tickEvent);
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
2012-11-02 17:32:01 +01:00
|
|
|
unsigned int
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
AtomicSimpleCPU::drain(DrainManager *dm)
|
2012-11-02 17:32:01 +01:00
|
|
|
{
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
assert(!drain_manager);
|
2013-01-07 19:05:52 +01:00
|
|
|
if (switchedOut())
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!isDrained()) {
|
|
|
|
DPRINTF(Drain, "Requesting drain: %s\n", pcState());
|
|
|
|
drain_manager = dm;
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
if (tickEvent.scheduled())
|
|
|
|
deschedule(tickEvent);
|
|
|
|
|
|
|
|
DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2012-11-02 17:32:01 +01:00
|
|
|
}
|
|
|
|
|
2006-07-12 23:11:57 +02:00
|
|
|
void
|
2012-11-02 17:32:01 +01:00
|
|
|
AtomicSimpleCPU::drainResume()
|
2006-07-12 23:11:57 +02:00
|
|
|
{
|
2013-01-07 19:05:52 +01:00
|
|
|
assert(!tickEvent.scheduled());
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
assert(!drain_manager);
|
2013-01-07 19:05:52 +01:00
|
|
|
if (switchedOut())
|
2007-11-08 16:46:41 +01:00
|
|
|
return;
|
|
|
|
|
2007-08-05 00:56:48 +02:00
|
|
|
DPRINTF(SimpleCPU, "Resume\n");
|
2013-02-15 23:40:08 +01:00
|
|
|
verifyMemoryMode();
|
2007-11-08 16:46:41 +01:00
|
|
|
|
2013-01-07 19:05:52 +01:00
|
|
|
assert(!threadContexts.empty());
|
|
|
|
if (threadContexts.size() > 1)
|
|
|
|
fatal("The atomic CPU only supports one thread.\n");
|
|
|
|
|
|
|
|
if (thread->status() == ThreadContext::Active) {
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
schedule(tickEvent, nextCycle());
|
2013-01-07 19:05:52 +01:00
|
|
|
_status = BaseSimpleCPU::Running;
|
|
|
|
} else {
|
|
|
|
_status = BaseSimpleCPU::Idle;
|
|
|
|
}
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
|
2011-02-07 07:14:17 +01:00
|
|
|
system->totalNumInsts = 0;
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
bool
|
|
|
|
AtomicSimpleCPU::tryCompleteDrain()
|
|
|
|
{
|
|
|
|
if (!drain_manager)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
|
|
|
|
if (!isDrained())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
DPRINTF(Drain, "CPU done draining, processing drain event\n");
|
|
|
|
drain_manager->signalDrainDone();
|
|
|
|
drain_manager = NULL;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
void
|
2006-06-30 01:45:24 +02:00
|
|
|
AtomicSimpleCPU::switchOut()
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2013-01-07 19:05:44 +01:00
|
|
|
BaseSimpleCPU::switchOut();
|
|
|
|
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
assert(!tickEvent.scheduled());
|
2012-11-02 17:32:01 +01:00
|
|
|
assert(_status == BaseSimpleCPU::Running || _status == Idle);
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
assert(isDrained());
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
|
|
|
|
{
|
2013-01-07 19:05:44 +01:00
|
|
|
BaseSimpleCPU::takeOverFrom(oldCPU);
|
2006-05-16 23:36:50 +02:00
|
|
|
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
// The tick event should have been descheduled by drain()
|
2006-05-16 23:36:50 +02:00
|
|
|
assert(!tickEvent.scheduled());
|
|
|
|
|
2008-11-03 03:56:57 +01:00
|
|
|
ifetch_req.setThreadContext(_cpuId, 0); // Add thread ID if we add MT
|
|
|
|
data_read_req.setThreadContext(_cpuId, 0); // Add thread ID here too
|
|
|
|
data_write_req.setThreadContext(_cpuId, 0); // Add thread ID here too
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
2013-02-15 23:40:08 +01:00
|
|
|
void
|
|
|
|
AtomicSimpleCPU::verifyMemoryMode() const
|
|
|
|
{
|
2013-02-15 23:40:09 +01:00
|
|
|
if (!system->isAtomicMode()) {
|
2013-02-15 23:40:08 +01:00
|
|
|
fatal("The atomic CPU requires the memory system to be in "
|
|
|
|
"'atomic' mode.\n");
|
|
|
|
}
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
void
|
2012-08-28 20:30:33 +02:00
|
|
|
AtomicSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2007-08-05 00:56:48 +02:00
|
|
|
DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
assert(thread_num == 0);
|
2006-06-07 21:29:53 +02:00
|
|
|
assert(thread);
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
assert(_status == Idle);
|
|
|
|
assert(!tickEvent.scheduled());
|
|
|
|
|
|
|
|
notIdleFraction++;
|
2012-08-28 20:30:33 +02:00
|
|
|
numCycles += ticksToCycles(thread->lastActivate - thread->lastSuspend);
|
2006-11-29 22:07:55 +01:00
|
|
|
|
2006-10-27 12:51:28 +02:00
|
|
|
//Make sure ticks are still on multiples of cycles
|
2012-08-28 20:30:31 +02:00
|
|
|
schedule(tickEvent, clockEdge(delay));
|
2012-11-02 17:32:01 +01:00
|
|
|
_status = BaseSimpleCPU::Running;
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2012-01-31 18:05:52 +01:00
|
|
|
AtomicSimpleCPU::suspendContext(ThreadID thread_num)
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2007-08-05 00:56:48 +02:00
|
|
|
DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
assert(thread_num == 0);
|
2006-06-07 21:29:53 +02:00
|
|
|
assert(thread);
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2009-04-19 11:23:29 +02:00
|
|
|
if (_status == Idle)
|
|
|
|
return;
|
|
|
|
|
2012-11-02 17:32:01 +01:00
|
|
|
assert(_status == BaseSimpleCPU::Running);
|
2006-05-18 04:08:44 +02:00
|
|
|
|
|
|
|
// tick event may not be scheduled if this gets called from inside
|
|
|
|
// an instruction's execution, e.g. "quiesce"
|
|
|
|
if (tickEvent.scheduled())
|
2008-10-09 13:58:24 +02:00
|
|
|
deschedule(tickEvent);
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
notIdleFraction--;
|
|
|
|
_status = Idle;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Fault
|
2011-07-03 07:35:04 +02:00
|
|
|
AtomicSimpleCPU::readMem(Addr addr, uint8_t * data,
|
|
|
|
unsigned size, unsigned flags)
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2006-10-08 19:43:31 +02:00
|
|
|
// use the CPU's statically allocated read request and packet objects
|
2007-06-30 19:16:18 +02:00
|
|
|
Request *req = &data_read_req;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
if (traceData) {
|
|
|
|
traceData->setAddr(addr);
|
|
|
|
}
|
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//The block size of our peer.
|
2009-06-05 08:21:12 +02:00
|
|
|
unsigned blockSize = dcachePort.peerBlockSize();
|
2007-08-27 05:27:11 +02:00
|
|
|
//The size of the data we're trying to read.
|
2010-08-13 15:16:02 +02:00
|
|
|
int fullSize = size;
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
//The address of the second part of this access if it needs to be split
|
|
|
|
//across a cache line boundary.
|
2010-08-13 15:16:02 +02:00
|
|
|
Addr secondAddr = roundDown(addr + size - 1, blockSize);
|
2007-08-27 05:27:11 +02:00
|
|
|
|
2010-08-13 15:16:02 +02:00
|
|
|
if (secondAddr > addr)
|
|
|
|
size = secondAddr - addr;
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
dcache_latency = 0;
|
2006-10-08 19:53:24 +02:00
|
|
|
|
2010-08-13 15:16:02 +02:00
|
|
|
while (1) {
|
2012-02-12 23:07:38 +01:00
|
|
|
req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
|
2007-07-29 03:00:05 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
// translate to physical address
|
2009-04-09 07:21:27 +02:00
|
|
|
Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Read);
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
// Now do the access.
|
2009-08-23 23:15:15 +02:00
|
|
|
if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
|
2007-08-27 05:27:11 +02:00
|
|
|
Packet pkt = Packet(req,
|
MEM: Remove the Broadcast destination from the packet
This patch simplifies the packet by removing the broadcast flag and
instead more firmly relying on (and enforcing) the semantics of
transactions in the classic memory system, i.e. request packets are
routed from a master to a slave based on the address, and when they
are created they have neither a valid source, nor destination. On
their way to the slave, the request packet is updated with a source
field for all modules that multiplex packets from multiple master
(e.g. a bus). When a request packet is turned into a response packet
(at the final slave), it moves the potentially populated source field
to the destination field, and the response packet is routed through
any multiplexing components back to the master based on the
destination field.
Modules that connect multiplexing components, such as caches and
bridges store any existing source and destination field in the sender
state as a stack (just as before).
The packet constructor is simplified in that there is no longer a need
to pass the Packet::Broadcast as the destination (this was always the
case for the classic memory system). In the case of Ruby, rather than
using the parameter to the constructor we now rely on setDest, as
there is already another three-argument constructor in the packet
class.
In many places where the packet information was printed as part of
DPRINTFs, request packets would be printed with a numeric "dest" that
would always be -1 (Broadcast) and that field is now removed from the
printing.
2012-04-14 11:45:55 +02:00
|
|
|
req->isLLSC() ? MemCmd::LoadLockedReq :
|
|
|
|
MemCmd::ReadReq);
|
2010-08-13 15:16:02 +02:00
|
|
|
pkt.dataStatic(data);
|
2007-08-27 05:27:11 +02:00
|
|
|
|
2011-03-02 08:18:47 +01:00
|
|
|
if (req->isMmappedIpr())
|
2007-08-27 05:27:11 +02:00
|
|
|
dcache_latency += TheISA::handleIprRead(thread->getTC(), &pkt);
|
|
|
|
else {
|
2012-04-06 19:46:31 +02:00
|
|
|
if (fastmem && system->isMemAddr(pkt.getAddr()))
|
|
|
|
system->getPhysMem().access(&pkt);
|
2007-08-27 05:27:11 +02:00
|
|
|
else
|
|
|
|
dcache_latency += dcachePort.sendAtomic(&pkt);
|
|
|
|
}
|
|
|
|
dcache_access = true;
|
2007-08-27 06:45:40 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
assert(!pkt.isError());
|
|
|
|
|
2009-04-20 06:44:15 +02:00
|
|
|
if (req->isLLSC()) {
|
2007-08-27 05:27:11 +02:00
|
|
|
TheISA::handleLockedRead(thread, req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//If there's a fault, return it
|
2009-11-11 06:10:18 +01:00
|
|
|
if (fault != NoFault) {
|
|
|
|
if (req->isPrefetch()) {
|
|
|
|
return NoFault;
|
|
|
|
} else {
|
|
|
|
return fault;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//If we don't need to access a second cache line, stop now.
|
|
|
|
if (secondAddr <= addr)
|
|
|
|
{
|
2009-04-19 13:50:07 +02:00
|
|
|
if (req->isLocked() && fault == NoFault) {
|
|
|
|
assert(!locked);
|
|
|
|
locked = true;
|
|
|
|
}
|
2007-08-27 05:27:11 +02:00
|
|
|
return fault;
|
2006-10-08 19:53:24 +02:00
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
/*
|
|
|
|
* Set up for accessing the second cache line.
|
|
|
|
*/
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//Move the pointer we're reading into to the correct location.
|
2010-08-13 15:16:02 +02:00
|
|
|
data += size;
|
2007-08-27 05:27:11 +02:00
|
|
|
//Adjust the size to get the remaining bytes.
|
2010-08-13 15:16:02 +02:00
|
|
|
size = addr + fullSize - secondAddr;
|
2007-08-27 05:27:11 +02:00
|
|
|
//And access the right address.
|
|
|
|
addr = secondAddr;
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
2010-08-13 15:16:02 +02:00
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
Fault
|
2011-07-03 07:35:04 +02:00
|
|
|
AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size,
|
|
|
|
Addr addr, unsigned flags, uint64_t *res)
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2006-10-08 19:43:31 +02:00
|
|
|
// use the CPU's statically allocated write request and packet objects
|
2007-06-30 19:16:18 +02:00
|
|
|
Request *req = &data_write_req;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
if (traceData) {
|
|
|
|
traceData->setAddr(addr);
|
|
|
|
}
|
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//The block size of our peer.
|
2009-06-05 08:21:12 +02:00
|
|
|
unsigned blockSize = dcachePort.peerBlockSize();
|
2007-08-27 05:27:11 +02:00
|
|
|
//The size of the data we're trying to read.
|
2010-08-13 15:16:02 +02:00
|
|
|
int fullSize = size;
|
2007-07-01 05:35:42 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//The address of the second part of this access if it needs to be split
|
|
|
|
//across a cache line boundary.
|
2010-08-13 15:16:02 +02:00
|
|
|
Addr secondAddr = roundDown(addr + size - 1, blockSize);
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
if(secondAddr > addr)
|
2010-08-13 15:16:02 +02:00
|
|
|
size = secondAddr - addr;
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
dcache_latency = 0;
|
|
|
|
|
|
|
|
while(1) {
|
2012-02-12 23:07:38 +01:00
|
|
|
req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
// translate to physical address
|
2009-04-09 07:21:27 +02:00
|
|
|
Fault fault = thread->dtb->translateAtomic(req, tc, BaseTLB::Write);
|
2007-08-27 05:27:11 +02:00
|
|
|
|
|
|
|
// Now do the access.
|
|
|
|
if (fault == NoFault) {
|
|
|
|
MemCmd cmd = MemCmd::WriteReq; // default
|
|
|
|
bool do_access = true; // flag to suppress cache access
|
|
|
|
|
2009-04-20 06:44:15 +02:00
|
|
|
if (req->isLLSC()) {
|
2007-08-27 05:27:11 +02:00
|
|
|
cmd = MemCmd::StoreCondReq;
|
|
|
|
do_access = TheISA::handleLockedWrite(thread, req);
|
|
|
|
} else if (req->isSwap()) {
|
|
|
|
cmd = MemCmd::SwapReq;
|
|
|
|
if (req->isCondSwap()) {
|
|
|
|
assert(res);
|
|
|
|
req->setExtraData(*res);
|
|
|
|
}
|
2006-11-29 23:11:10 +01:00
|
|
|
}
|
2007-07-01 05:35:42 +02:00
|
|
|
|
2009-08-23 23:15:15 +02:00
|
|
|
if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
|
MEM: Remove the Broadcast destination from the packet
This patch simplifies the packet by removing the broadcast flag and
instead more firmly relying on (and enforcing) the semantics of
transactions in the classic memory system, i.e. request packets are
routed from a master to a slave based on the address, and when they
are created they have neither a valid source, nor destination. On
their way to the slave, the request packet is updated with a source
field for all modules that multiplex packets from multiple master
(e.g. a bus). When a request packet is turned into a response packet
(at the final slave), it moves the potentially populated source field
to the destination field, and the response packet is routed through
any multiplexing components back to the master based on the
destination field.
Modules that connect multiplexing components, such as caches and
bridges store any existing source and destination field in the sender
state as a stack (just as before).
The packet constructor is simplified in that there is no longer a need
to pass the Packet::Broadcast as the destination (this was always the
case for the classic memory system). In the case of Ruby, rather than
using the parameter to the constructor we now rely on setDest, as
there is already another three-argument constructor in the packet
class.
In many places where the packet information was printed as part of
DPRINTFs, request packets would be printed with a numeric "dest" that
would always be -1 (Broadcast) and that field is now removed from the
printing.
2012-04-14 11:45:55 +02:00
|
|
|
Packet pkt = Packet(req, cmd);
|
2010-08-13 15:16:02 +02:00
|
|
|
pkt.dataStatic(data);
|
2007-08-27 05:27:11 +02:00
|
|
|
|
2011-03-02 08:18:47 +01:00
|
|
|
if (req->isMmappedIpr()) {
|
2007-08-27 05:27:11 +02:00
|
|
|
dcache_latency +=
|
|
|
|
TheISA::handleIprWrite(thread->getTC(), &pkt);
|
|
|
|
} else {
|
2012-04-06 19:46:31 +02:00
|
|
|
if (fastmem && system->isMemAddr(pkt.getAddr()))
|
|
|
|
system->getPhysMem().access(&pkt);
|
2007-08-27 05:27:11 +02:00
|
|
|
else
|
|
|
|
dcache_latency += dcachePort.sendAtomic(&pkt);
|
|
|
|
}
|
|
|
|
dcache_access = true;
|
|
|
|
assert(!pkt.isError());
|
|
|
|
|
|
|
|
if (req->isSwap()) {
|
|
|
|
assert(res);
|
2010-08-13 15:16:02 +02:00
|
|
|
memcpy(res, pkt.getPtr<uint8_t>(), fullSize);
|
2007-08-27 05:27:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res && !req->isSwap()) {
|
|
|
|
*res = req->getExtraData();
|
2007-07-01 05:35:42 +02:00
|
|
|
}
|
2006-10-08 19:53:24 +02:00
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//If there's a fault or we don't need to access a second cache line,
|
|
|
|
//stop now.
|
|
|
|
if (fault != NoFault || secondAddr <= addr)
|
|
|
|
{
|
2009-04-19 13:50:07 +02:00
|
|
|
if (req->isLocked() && fault == NoFault) {
|
|
|
|
assert(locked);
|
|
|
|
locked = false;
|
|
|
|
}
|
2009-11-11 06:10:18 +01:00
|
|
|
if (fault != NoFault && req->isPrefetch()) {
|
|
|
|
return NoFault;
|
|
|
|
} else {
|
|
|
|
return fault;
|
|
|
|
}
|
2006-05-19 04:54:19 +02:00
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
/*
|
|
|
|
* Set up for accessing the second cache line.
|
|
|
|
*/
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2007-08-27 05:27:11 +02:00
|
|
|
//Move the pointer we're reading into to the correct location.
|
2010-08-13 15:16:02 +02:00
|
|
|
data += size;
|
2007-08-27 05:27:11 +02:00
|
|
|
//Adjust the size to get the remaining bytes.
|
2010-08-13 15:16:02 +02:00
|
|
|
size = addr + fullSize - secondAddr;
|
2007-08-27 05:27:11 +02:00
|
|
|
//And access the right address.
|
|
|
|
addr = secondAddr;
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
AtomicSimpleCPU::tick()
|
|
|
|
{
|
2007-08-05 00:56:48 +02:00
|
|
|
DPRINTF(SimpleCPU, "Tick\n");
|
|
|
|
|
2008-06-18 19:15:21 +02:00
|
|
|
Tick latency = 0;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2009-04-19 13:50:07 +02:00
|
|
|
for (int i = 0; i < width || locked; ++i) {
|
2006-05-16 23:36:50 +02:00
|
|
|
numCycles++;
|
|
|
|
|
2006-10-23 08:39:02 +02:00
|
|
|
if (!curStaticInst || !curStaticInst->isDelayedCommit())
|
|
|
|
checkForInterrupts();
|
2006-05-18 04:08:44 +02:00
|
|
|
|
2008-02-14 22:14:35 +01:00
|
|
|
checkPcEventQueue();
|
2011-03-18 01:20:20 +01:00
|
|
|
// We must have just got suspended by a PC event
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
if (_status == Idle) {
|
|
|
|
tryCompleteDrain();
|
2011-03-18 01:20:20 +01:00
|
|
|
return;
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
}
|
2008-02-14 22:14:35 +01:00
|
|
|
|
2008-10-13 04:32:06 +02:00
|
|
|
Fault fault = NoFault;
|
|
|
|
|
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors.
This change is a low level and pervasive reorganization of how PCs are managed
in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about,
the PC and the NPC, and the lsb of the PC signaled whether or not you were in
PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next
micropc, x86 and ARM introduced variable length instruction sets, and ARM
started to keep track of mode bits in the PC. Each CPU model handled PCs in
its own custom way that needed to be updated individually to handle the new
dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack,
the complexity could be hidden in the ISA at the ISA implementation's expense.
Areas like the branch predictor hadn't been updated to handle branch delay
slots or micropcs, and it turns out that had introduced a significant (10s of
percent) performance bug in SPARC and to a lesser extend MIPS. Rather than
perpetuate the problem by reworking O3 again to handle the PC features needed
by x86, this change was introduced to rework PC handling in a more modular,
transparent, and hopefully efficient way.
PC type:
Rather than having the superset of all possible elements of PC state declared
in each of the CPU models, each ISA defines its own PCState type which has
exactly the elements it needs. A cross product of canned PCState classes are
defined in the new "generic" ISA directory for ISAs with/without delay slots
and microcode. These are either typedef-ed or subclassed by each ISA. To read
or write this structure through a *Context, you use the new pcState() accessor
which reads or writes depending on whether it has an argument. If you just
want the address of the current or next instruction or the current micro PC,
you can get those through read-only accessors on either the PCState type or
the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the
move away from readPC. That name is ambiguous since it's not clear whether or
not it should be the actual address to fetch from, or if it should have extra
bits in it like the PAL mode bit. Each class is free to define its own
functions to get at whatever values it needs however it needs to to be used in
ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the
PC and into a separate field like ARM.
These types can be reset to a particular pc (where npc = pc +
sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as
appropriate), printed, serialized, and compared. There is a branching()
function which encapsulates code in the CPU models that checked if an
instruction branched or not. Exactly what that means in the context of branch
delay slots which can skip an instruction when not taken is ambiguous, and
ideally this function and its uses can be eliminated. PCStates also generally
know how to advance themselves in various ways depending on if they point at
an instruction, a microop, or the last microop of a macroop. More on that
later.
Ideally, accessing all the PCs at once when setting them will improve
performance of M5 even though more data needs to be moved around. This is
because often all the PCs need to be manipulated together, and by getting them
all at once you avoid multiple function calls. Also, the PCs of a particular
thread will have spatial locality in the cache. Previously they were grouped
by element in arrays which spread out accesses.
Advancing the PC:
The PCs were previously managed entirely by the CPU which had to know about PC
semantics, try to figure out which dimension to increment the PC in, what to
set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction
with the PC type itself. Because most of the information about how to
increment the PC (mainly what type of instruction it refers to) is contained
in the instruction object, a new advancePC virtual function was added to the
StaticInst class. Subclasses provide an implementation that moves around the
right element of the PC with a minimal amount of decision making. In ISAs like
Alpha, the instructions always simply assign NPC to PC without having to worry
about micropcs, nnpcs, etc. The added cost of a virtual function call should
be outweighed by not having to figure out as much about what to do with the
PCs and mucking around with the extra elements.
One drawback of making the StaticInsts advance the PC is that you have to
actually have one to advance the PC. This would, superficially, seem to
require decoding an instruction before fetch could advance. This is, as far as
I can tell, realistic. fetch would advance through memory addresses, not PCs,
perhaps predicting new memory addresses using existing ones. More
sophisticated decisions about control flow would be made later on, after the
instruction was decoded, and handed back to fetch. If branching needs to
happen, some amount of decoding needs to happen to see that it's a branch,
what the target is, etc. This could get a little more complicated if that gets
done by the predecoder, but I'm choosing to ignore that for now.
Variable length instructions:
To handle variable length instructions in x86 and ARM, the predecoder now
takes in the current PC by reference to the getExtMachInst function. It can
modify the PC however it needs to (by setting NPC to be the PC + instruction
length, for instance). This could be improved since the CPU doesn't know if
the PC was modified and always has to write it back.
ISA parser:
To support the new API, all PC related operand types were removed from the
parser and replaced with a PCState type. There are two warts on this
implementation. First, as with all the other operand types, the PCState still
has to have a valid operand type even though it doesn't use it. Second, using
syntax like PCS.npc(target) doesn't work for two reasons, this looks like the
syntax for operand type overriding, and the parser can't figure out if you're
reading or writing. Instructions that use the PCS operand (which I've
consistently called it) need to first read it into a local variable,
manipulate it, and then write it back out.
Return address stack:
The return address stack needed a little extra help because, in the presence
of branch delay slots, it has to merge together elements of the return PC and
the call PC. To handle that, a buildRetPC utility function was added. There
are basically only two versions in all the ISAs, but it didn't seem short
enough to put into the generic ISA directory. Also, the branch predictor code
in O3 and InOrder were adjusted so that they always store the PC of the actual
call instruction in the RAS, not the next PC. If the call instruction is a
microop, the next PC refers to the next microop in the same macroop which is
probably not desirable. The buildRetPC function advances the PC intelligently
to the next macroop (in an ISA specific way) so that that case works.
Change in stats:
There were no change in stats except in MIPS and SPARC in the O3 model. MIPS
runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could
likely be improved further by setting call/return instruction flags and taking
advantage of the RAS.
TODO:
Add != operators to the PCState classes, defined trivially to be !(a==b).
Smooth out places where PCs are split apart, passed around, and put back
together later. I think this might happen in SPARC's fault code. Add ISA
specific constructors that allow setting PC elements without calling a bunch
of accessors. Try to eliminate the need for the branching() function. Factor
out Alpha's PAL mode pc bit into a separate flag field, and eliminate places
where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
|
|
|
TheISA::PCState pcState = thread->pcState();
|
|
|
|
|
|
|
|
bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
|
|
|
|
!curMacroStaticInst;
|
|
|
|
if (needToFetch) {
|
2009-02-25 19:16:15 +01:00
|
|
|
setupFetchRequest(&ifetch_req);
|
2009-04-09 07:21:27 +02:00
|
|
|
fault = thread->itb->translateAtomic(&ifetch_req, tc,
|
|
|
|
BaseTLB::Execute);
|
2009-02-25 19:16:15 +01:00
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
if (fault == NoFault) {
|
2007-03-15 03:47:42 +01:00
|
|
|
Tick icache_latency = 0;
|
|
|
|
bool icache_access = false;
|
|
|
|
dcache_access = false; // assume no dcache access
|
2006-05-31 04:30:42 +02:00
|
|
|
|
ISA,CPU,etc: Create an ISA defined PC type that abstracts out ISA behaviors.
This change is a low level and pervasive reorganization of how PCs are managed
in M5. Back when Alpha was the only ISA, there were only 2 PCs to worry about,
the PC and the NPC, and the lsb of the PC signaled whether or not you were in
PAL mode. As other ISAs were added, we had to add an NNPC, micro PC and next
micropc, x86 and ARM introduced variable length instruction sets, and ARM
started to keep track of mode bits in the PC. Each CPU model handled PCs in
its own custom way that needed to be updated individually to handle the new
dimensions of variability, or, in the case of ARMs mode-bit-in-the-pc hack,
the complexity could be hidden in the ISA at the ISA implementation's expense.
Areas like the branch predictor hadn't been updated to handle branch delay
slots or micropcs, and it turns out that had introduced a significant (10s of
percent) performance bug in SPARC and to a lesser extend MIPS. Rather than
perpetuate the problem by reworking O3 again to handle the PC features needed
by x86, this change was introduced to rework PC handling in a more modular,
transparent, and hopefully efficient way.
PC type:
Rather than having the superset of all possible elements of PC state declared
in each of the CPU models, each ISA defines its own PCState type which has
exactly the elements it needs. A cross product of canned PCState classes are
defined in the new "generic" ISA directory for ISAs with/without delay slots
and microcode. These are either typedef-ed or subclassed by each ISA. To read
or write this structure through a *Context, you use the new pcState() accessor
which reads or writes depending on whether it has an argument. If you just
want the address of the current or next instruction or the current micro PC,
you can get those through read-only accessors on either the PCState type or
the *Contexts. These are instAddr(), nextInstAddr(), and microPC(). Note the
move away from readPC. That name is ambiguous since it's not clear whether or
not it should be the actual address to fetch from, or if it should have extra
bits in it like the PAL mode bit. Each class is free to define its own
functions to get at whatever values it needs however it needs to to be used in
ISA specific code. Eventually Alpha's PAL mode bit could be moved out of the
PC and into a separate field like ARM.
These types can be reset to a particular pc (where npc = pc +
sizeof(MachInst), nnpc = npc + sizeof(MachInst), upc = 0, nupc = 1 as
appropriate), printed, serialized, and compared. There is a branching()
function which encapsulates code in the CPU models that checked if an
instruction branched or not. Exactly what that means in the context of branch
delay slots which can skip an instruction when not taken is ambiguous, and
ideally this function and its uses can be eliminated. PCStates also generally
know how to advance themselves in various ways depending on if they point at
an instruction, a microop, or the last microop of a macroop. More on that
later.
Ideally, accessing all the PCs at once when setting them will improve
performance of M5 even though more data needs to be moved around. This is
because often all the PCs need to be manipulated together, and by getting them
all at once you avoid multiple function calls. Also, the PCs of a particular
thread will have spatial locality in the cache. Previously they were grouped
by element in arrays which spread out accesses.
Advancing the PC:
The PCs were previously managed entirely by the CPU which had to know about PC
semantics, try to figure out which dimension to increment the PC in, what to
set NPC/NNPC, etc. These decisions are best left to the ISA in conjunction
with the PC type itself. Because most of the information about how to
increment the PC (mainly what type of instruction it refers to) is contained
in the instruction object, a new advancePC virtual function was added to the
StaticInst class. Subclasses provide an implementation that moves around the
right element of the PC with a minimal amount of decision making. In ISAs like
Alpha, the instructions always simply assign NPC to PC without having to worry
about micropcs, nnpcs, etc. The added cost of a virtual function call should
be outweighed by not having to figure out as much about what to do with the
PCs and mucking around with the extra elements.
One drawback of making the StaticInsts advance the PC is that you have to
actually have one to advance the PC. This would, superficially, seem to
require decoding an instruction before fetch could advance. This is, as far as
I can tell, realistic. fetch would advance through memory addresses, not PCs,
perhaps predicting new memory addresses using existing ones. More
sophisticated decisions about control flow would be made later on, after the
instruction was decoded, and handed back to fetch. If branching needs to
happen, some amount of decoding needs to happen to see that it's a branch,
what the target is, etc. This could get a little more complicated if that gets
done by the predecoder, but I'm choosing to ignore that for now.
Variable length instructions:
To handle variable length instructions in x86 and ARM, the predecoder now
takes in the current PC by reference to the getExtMachInst function. It can
modify the PC however it needs to (by setting NPC to be the PC + instruction
length, for instance). This could be improved since the CPU doesn't know if
the PC was modified and always has to write it back.
ISA parser:
To support the new API, all PC related operand types were removed from the
parser and replaced with a PCState type. There are two warts on this
implementation. First, as with all the other operand types, the PCState still
has to have a valid operand type even though it doesn't use it. Second, using
syntax like PCS.npc(target) doesn't work for two reasons, this looks like the
syntax for operand type overriding, and the parser can't figure out if you're
reading or writing. Instructions that use the PCS operand (which I've
consistently called it) need to first read it into a local variable,
manipulate it, and then write it back out.
Return address stack:
The return address stack needed a little extra help because, in the presence
of branch delay slots, it has to merge together elements of the return PC and
the call PC. To handle that, a buildRetPC utility function was added. There
are basically only two versions in all the ISAs, but it didn't seem short
enough to put into the generic ISA directory. Also, the branch predictor code
in O3 and InOrder were adjusted so that they always store the PC of the actual
call instruction in the RAS, not the next PC. If the call instruction is a
microop, the next PC refers to the next microop in the same macroop which is
probably not desirable. The buildRetPC function advances the PC intelligently
to the next macroop (in an ISA specific way) so that that case works.
Change in stats:
There were no change in stats except in MIPS and SPARC in the O3 model. MIPS
runs in about 9% fewer ticks. SPARC runs with 30%-50% fewer ticks, which could
likely be improved further by setting call/return instruction flags and taking
advantage of the RAS.
TODO:
Add != operators to the PCState classes, defined trivially to be !(a==b).
Smooth out places where PCs are split apart, passed around, and put back
together later. I think this might happen in SPARC's fault code. Add ISA
specific constructors that allow setting PC elements without calling a bunch
of accessors. Try to eliminate the need for the branching() function. Factor
out Alpha's PAL mode pc bit into a separate flag field, and eliminate places
where it's blindly masked out or tested in the PC.
2010-10-31 08:07:20 +01:00
|
|
|
if (needToFetch) {
|
2012-05-26 22:44:46 +02:00
|
|
|
// This is commented out because the decoder would act like
|
2008-10-13 08:52:02 +02:00
|
|
|
// a tiny cache otherwise. It wouldn't be flushed when needed
|
|
|
|
// like the I cache. It should be flushed, and when that works
|
|
|
|
// this code should be uncommented.
|
2008-10-13 04:32:06 +02:00
|
|
|
//Fetch more instruction memory if necessary
|
2012-05-26 22:44:46 +02:00
|
|
|
//if(decoder.needMoreBytes())
|
2008-10-13 04:32:06 +02:00
|
|
|
//{
|
|
|
|
icache_access = true;
|
MEM: Remove the Broadcast destination from the packet
This patch simplifies the packet by removing the broadcast flag and
instead more firmly relying on (and enforcing) the semantics of
transactions in the classic memory system, i.e. request packets are
routed from a master to a slave based on the address, and when they
are created they have neither a valid source, nor destination. On
their way to the slave, the request packet is updated with a source
field for all modules that multiplex packets from multiple master
(e.g. a bus). When a request packet is turned into a response packet
(at the final slave), it moves the potentially populated source field
to the destination field, and the response packet is routed through
any multiplexing components back to the master based on the
destination field.
Modules that connect multiplexing components, such as caches and
bridges store any existing source and destination field in the sender
state as a stack (just as before).
The packet constructor is simplified in that there is no longer a need
to pass the Packet::Broadcast as the destination (this was always the
case for the classic memory system). In the case of Ruby, rather than
using the parameter to the constructor we now rely on setDest, as
there is already another three-argument constructor in the packet
class.
In many places where the packet information was printed as part of
DPRINTFs, request packets would be printed with a numeric "dest" that
would always be -1 (Broadcast) and that field is now removed from the
printing.
2012-04-14 11:45:55 +02:00
|
|
|
Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
|
2008-10-13 04:32:06 +02:00
|
|
|
ifetch_pkt.dataStatic(&inst);
|
|
|
|
|
2012-04-06 19:46:31 +02:00
|
|
|
if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
|
|
|
|
system->getPhysMem().access(&ifetch_pkt);
|
2008-10-13 04:32:06 +02:00
|
|
|
else
|
|
|
|
icache_latency = icachePort.sendAtomic(&ifetch_pkt);
|
2007-08-09 00:43:12 +02:00
|
|
|
|
2008-10-13 04:32:06 +02:00
|
|
|
assert(!ifetch_pkt.isError());
|
2007-08-09 00:43:12 +02:00
|
|
|
|
2008-10-13 04:32:06 +02:00
|
|
|
// ifetch_req is initialized to read the instruction directly
|
|
|
|
// into the CPU object's inst field.
|
|
|
|
//}
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
preExecute();
|
2006-12-04 06:54:40 +01:00
|
|
|
|
2007-08-27 05:29:09 +02:00
|
|
|
if (curStaticInst) {
|
2007-03-15 03:47:42 +01:00
|
|
|
fault = curStaticInst->execute(this, traceData);
|
2007-08-27 05:25:42 +02:00
|
|
|
|
|
|
|
// keep an instruction count
|
|
|
|
if (fault == NoFault)
|
|
|
|
countInst();
|
2010-08-26 02:10:43 +02:00
|
|
|
else if (traceData && !DTRACE(ExecFaulting)) {
|
2007-08-27 05:29:09 +02:00
|
|
|
delete traceData;
|
|
|
|
traceData = NULL;
|
|
|
|
}
|
2007-08-27 05:25:42 +02:00
|
|
|
|
2007-03-15 03:47:42 +01:00
|
|
|
postExecute();
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2006-12-04 06:54:40 +01:00
|
|
|
// @todo remove me after debugging with legion done
|
2007-06-12 18:21:47 +02:00
|
|
|
if (curStaticInst && (!curStaticInst->isMicroop() ||
|
|
|
|
curStaticInst->isFirstMicroop()))
|
2006-12-04 06:54:40 +01:00
|
|
|
instCnt++;
|
|
|
|
|
2008-06-18 19:15:21 +02:00
|
|
|
Tick stall_ticks = 0;
|
|
|
|
if (simulate_inst_stalls && icache_access)
|
|
|
|
stall_ticks += icache_latency;
|
|
|
|
|
|
|
|
if (simulate_data_stalls && dcache_access)
|
|
|
|
stall_ticks += dcache_latency;
|
|
|
|
|
|
|
|
if (stall_ticks) {
|
2012-08-28 20:30:33 +02:00
|
|
|
// the atomic cpu does its accounting in ticks, so
|
|
|
|
// keep counting in ticks but round to the clock
|
|
|
|
// period
|
|
|
|
latency += divCeil(stall_ticks, clockPeriod()) *
|
|
|
|
clockPeriod();
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2007-05-18 19:42:50 +02:00
|
|
|
if(fault != NoFault || !stayAtPC)
|
2007-03-15 03:47:42 +01:00
|
|
|
advancePC(fault);
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
cpu: Make sure that a drained atomic CPU isn't executing ucode
Currently, the atomic CPU can be in the middle of a microcode sequence
when it is drained. This leads to two problems:
* When switching to a hardware virtualized CPU, we obviously can't
execute gem5 microcode.
* Since curMacroStaticInst is populated when executing microcode,
repeated switching between CPUs executing microcode leads to
incorrect execution.
After applying this patch, the CPU will be on a proper instruction
boundary, which means that it is safe to switch to any CPU model
(including hardware virtualized ones). This changeset fixes a bug
where the multiple switches to the same atomic CPU sometimes corrupts
the target state because of dangling pointers to the currently
executing microinstruction.
Note: This changeset moves tick event descheduling from switchOut() to
drain(), which makes timing consistent between just draining a system
and draining /and/ switching between two atomic CPUs. This makes
debugging quite a lot easier (execution traces get the same timing),
but the latency of the last instruction before a drain will not be
accounted for correctly (it will always be 1 cycle).
Note 2: This changeset removes so_state variable, the locked variable,
and the tickEvent from checkpoints since none of them contain state
that needs to be preserved across checkpoints. The so_state is made
redundant because we don't use the drain state variable anymore, the
lock variable should never be set when the system is drained, and the
tick event isn't scheduled.
2013-01-07 19:05:46 +01:00
|
|
|
if (tryCompleteDrain())
|
|
|
|
return;
|
|
|
|
|
2008-06-18 19:15:21 +02:00
|
|
|
// instruction takes at least one cycle
|
2012-08-28 20:30:31 +02:00
|
|
|
if (latency < clockPeriod())
|
|
|
|
latency = clockPeriod();
|
2008-06-18 19:15:21 +02:00
|
|
|
|
2006-05-18 04:08:44 +02:00
|
|
|
if (_status != Idle)
|
2011-01-08 06:50:29 +01:00
|
|
|
schedule(tickEvent, curTick() + latency);
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-02 22:46:22 +01:00
|
|
|
void
|
|
|
|
AtomicSimpleCPU::printAddr(Addr a)
|
|
|
|
{
|
|
|
|
dcachePort.printAddr(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// AtomicSimpleCPU Simulation Object
|
|
|
|
//
|
2007-07-24 06:51:38 +02:00
|
|
|
AtomicSimpleCPU *
|
|
|
|
AtomicSimpleCPUParams::create()
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
2008-08-11 21:22:16 +02:00
|
|
|
numThreads = 1;
|
2011-11-01 12:01:13 +01:00
|
|
|
if (!FullSystem && workload.size() != 1)
|
2007-07-24 06:51:38 +02:00
|
|
|
panic("only one workload allowed");
|
2008-08-11 21:22:16 +02:00
|
|
|
return new AtomicSimpleCPU(this);
|
2006-05-16 23:36:50 +02:00
|
|
|
}
|