2006-05-16 23:36:50 +02:00
|
|
|
/*
|
2015-09-30 18:14:19 +02:00
|
|
|
* Copyright (c) 2012-2013,2015 ARM Limited
|
2013-01-07 19:05:46 +01:00
|
|
|
* All rights reserved
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
2006-05-16 23:36:50 +02:00
|
|
|
* Copyright (c) 2002-2005 The Regents of The University of Michigan
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2006-06-01 01:26:56 +02:00
|
|
|
*
|
|
|
|
* Authors: Steve Reinhardt
|
2006-05-16 23:36:50 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __CPU_SIMPLE_TIMING_HH__
|
|
|
|
#define __CPU_SIMPLE_TIMING_HH__
|
|
|
|
|
|
|
|
#include "cpu/simple/base.hh"
|
2015-09-30 18:14:19 +02:00
|
|
|
#include "cpu/simple/exec_context.hh"
|
2010-02-12 20:53:19 +01:00
|
|
|
#include "cpu/translation.hh"
|
2008-08-11 21:22:16 +02:00
|
|
|
#include "params/TimingSimpleCPU.hh"
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
class TimingSimpleCPU : public BaseSimpleCPU
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
2008-08-11 21:22:16 +02:00
|
|
|
TimingSimpleCPU(TimingSimpleCPUParams * params);
|
2006-05-16 23:36:50 +02:00
|
|
|
virtual ~TimingSimpleCPU();
|
|
|
|
|
2015-10-12 10:08:01 +02:00
|
|
|
void init() override;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
|
2008-11-10 06:56:28 +01:00
|
|
|
/*
|
|
|
|
* If an access needs to be broken into fragments, currently at most two,
|
|
|
|
* the the following two classes are used as the sender state of the
|
|
|
|
* packets so the CPU can keep track of everything. In the main packet
|
|
|
|
* sender state, there's an array with a spot for each fragment. If a
|
|
|
|
* fragment has already been accepted by the CPU, aka isn't waiting for
|
|
|
|
* a retry, it's pointer is NULL. After each fragment has successfully
|
|
|
|
* been processed, the "outstanding" counter is decremented. Once the
|
|
|
|
* count is zero, the entire larger access is complete.
|
|
|
|
*/
|
|
|
|
class SplitMainSenderState : public Packet::SenderState
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
int outstanding;
|
|
|
|
PacketPtr fragments[2];
|
|
|
|
|
|
|
|
int
|
|
|
|
getPendingFragment()
|
|
|
|
{
|
|
|
|
if (fragments[0]) {
|
|
|
|
return 0;
|
|
|
|
} else if (fragments[1]) {
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class SplitFragmentSenderState : public Packet::SenderState
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
SplitFragmentSenderState(PacketPtr _bigPkt, int _index) :
|
|
|
|
bigPkt(_bigPkt), index(_index)
|
|
|
|
{}
|
|
|
|
PacketPtr bigPkt;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
void
|
|
|
|
clearFromParent()
|
|
|
|
{
|
|
|
|
SplitMainSenderState * main_send_state =
|
|
|
|
dynamic_cast<SplitMainSenderState *>(bigPkt->senderState);
|
|
|
|
main_send_state->fragments[index] = NULL;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2009-02-25 19:16:15 +01:00
|
|
|
class FetchTranslation : public BaseTLB::Translation
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
TimingSimpleCPU *cpu;
|
|
|
|
|
|
|
|
public:
|
2009-04-09 07:21:27 +02:00
|
|
|
FetchTranslation(TimingSimpleCPU *_cpu)
|
|
|
|
: cpu(_cpu)
|
2009-02-25 19:16:15 +01:00
|
|
|
{}
|
|
|
|
|
2011-02-12 01:29:35 +01:00
|
|
|
void
|
|
|
|
markDelayed()
|
2011-02-12 01:29:35 +01:00
|
|
|
{
|
2012-11-02 17:32:01 +01:00
|
|
|
assert(cpu->_status == BaseSimpleCPU::Running);
|
2011-02-12 01:29:35 +01:00
|
|
|
cpu->_status = ITBWaitResponse;
|
|
|
|
}
|
2011-02-12 01:29:35 +01:00
|
|
|
|
2009-04-09 07:21:27 +02:00
|
|
|
void
|
2014-09-19 16:35:18 +02:00
|
|
|
finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
|
2009-04-09 07:21:27 +02:00
|
|
|
BaseTLB::Mode mode)
|
2009-02-25 19:16:15 +01:00
|
|
|
{
|
|
|
|
cpu->sendFetch(fault, req, tc);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
FetchTranslation fetchTranslation;
|
|
|
|
|
2015-09-30 18:14:19 +02:00
|
|
|
void threadSnoop(PacketPtr pkt, ThreadID sender);
|
2010-02-12 20:53:19 +01:00
|
|
|
void sendData(RequestPtr req, uint8_t *data, uint64_t *res, bool read);
|
|
|
|
void sendSplitData(RequestPtr req1, RequestPtr req2, RequestPtr req,
|
|
|
|
uint8_t *data, bool read);
|
2009-02-25 19:16:15 +01:00
|
|
|
|
2014-09-19 16:35:18 +02:00
|
|
|
void translationFault(const Fault &fault);
|
2009-02-25 19:16:15 +01:00
|
|
|
|
2015-01-22 11:00:53 +01:00
|
|
|
PacketPtr buildPacket(RequestPtr req, bool read);
|
2009-02-25 19:16:15 +01:00
|
|
|
void buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
|
|
|
|
RequestPtr req1, RequestPtr req2, RequestPtr req,
|
|
|
|
uint8_t *data, bool read);
|
2008-11-14 08:30:37 +01:00
|
|
|
|
2008-11-10 06:56:28 +01:00
|
|
|
bool handleReadPacket(PacketPtr pkt);
|
|
|
|
// This function always implicitly uses dcache_pkt.
|
|
|
|
bool handleWritePacket();
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
/**
|
|
|
|
* A TimingCPUPort overrides the default behaviour of the
|
|
|
|
* recvTiming and recvRetry and implements events for the
|
|
|
|
* scheduling of handling of incoming packets in the following
|
|
|
|
* cycle.
|
|
|
|
*/
|
2013-03-26 19:46:42 +01:00
|
|
|
class TimingCPUPort : public MasterPort
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
TimingCPUPort(const std::string& _name, TimingSimpleCPU* _cpu)
|
2015-03-02 10:00:35 +01:00
|
|
|
: MasterPort(_name, _cpu), cpu(_cpu), retryRespEvent(this)
|
2006-05-16 23:36:50 +02:00
|
|
|
{ }
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
TimingSimpleCPU* cpu;
|
2006-07-21 01:00:40 +02:00
|
|
|
|
|
|
|
struct TickEvent : public Event
|
|
|
|
{
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr pkt;
|
2006-07-21 01:00:40 +02:00
|
|
|
TimingSimpleCPU *cpu;
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
TickEvent(TimingSimpleCPU *_cpu) : pkt(NULL), cpu(_cpu) {}
|
2008-02-06 22:32:40 +01:00
|
|
|
const char *description() const { return "Timing CPU tick"; }
|
2006-10-20 09:10:12 +02:00
|
|
|
void schedule(PacketPtr _pkt, Tick t);
|
2006-07-21 01:00:40 +02:00
|
|
|
};
|
|
|
|
|
2015-03-02 10:00:35 +01:00
|
|
|
EventWrapper<MasterPort, &MasterPort::sendRetryResp> retryRespEvent;
|
2006-05-16 23:36:50 +02:00
|
|
|
};
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
class IcachePort : public TimingCPUPort
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
IcachePort(TimingSimpleCPU *_cpu)
|
2012-07-09 18:35:39 +02:00
|
|
|
: TimingCPUPort(_cpu->name() + ".icache_port", _cpu),
|
2012-01-17 19:55:08 +01:00
|
|
|
tickEvent(_cpu)
|
2006-05-16 23:36:50 +02:00
|
|
|
{ }
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
MEM: Separate requests and responses for timing accesses
This patch moves send/recvTiming and send/recvTimingSnoop from the
Port base class to the MasterPort and SlavePort, and also splits them
into separate member functions for requests and responses:
send/recvTimingReq, send/recvTimingResp, and send/recvTimingSnoopReq,
send/recvTimingSnoopResp. A master port sends requests and receives
responses, and also receives snoop requests and sends snoop
responses. A slave port has the reciprocal behaviour as it receives
requests and sends responses, and sends snoop requests and receives
snoop responses.
For all MemObjects that have only master ports or slave ports (but not
both), e.g. a CPU, or a PIO device, this patch merely adds more
clarity to what kind of access is taking place. For example, a CPU
port used to call sendTiming, and will now call
sendTimingReq. Similarly, a response previously came back through
recvTiming, which is now recvTimingResp. For the modules that have
both master and slave ports, e.g. the bus, the behaviour was
previously relying on branches based on pkt->isRequest(), and this is
now replaced with a direct call to the apprioriate member function
depending on the type of access. Please note that send/recvRetry is
still shared by all the timing accessors and remains in the Port base
class for now (to maintain the current bus functionality and avoid
changing the statistics of all regressions).
The packet queue is split into a MasterPort and SlavePort version to
facilitate the use of the new timing accessors. All uses of the
PacketQueue are updated accordingly.
With this patch, the type of packet (request or response) is now well
defined for each type of access, and asserts on pkt->isRequest() and
pkt->isResponse() are now moved to the appropriate send member
functions. It is also worth noting that sendTimingSnoopReq no longer
returns a boolean, as the semantics do not alow snoop requests to be
rejected or stalled. All these assumptions are now excplicitly part of
the port interface itself.
2012-05-01 19:40:42 +02:00
|
|
|
virtual bool recvTimingResp(PacketPtr pkt);
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2015-03-02 10:00:35 +01:00
|
|
|
virtual void recvReqRetry();
|
2006-07-21 01:00:40 +02:00
|
|
|
|
|
|
|
struct ITickEvent : public TickEvent
|
|
|
|
{
|
|
|
|
|
|
|
|
ITickEvent(TimingSimpleCPU *_cpu)
|
|
|
|
: TickEvent(_cpu) {}
|
|
|
|
void process();
|
2008-02-06 22:32:40 +01:00
|
|
|
const char *description() const { return "Timing CPU icache tick"; }
|
2006-07-21 01:00:40 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
ITickEvent tickEvent;
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
};
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
class DcachePort : public TimingCPUPort
|
2006-05-16 23:36:50 +02:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
2012-01-17 19:55:08 +01:00
|
|
|
DcachePort(TimingSimpleCPU *_cpu)
|
2012-07-09 18:35:39 +02:00
|
|
|
: TimingCPUPort(_cpu->name() + ".dcache_port", _cpu),
|
|
|
|
tickEvent(_cpu)
|
2014-01-24 22:29:30 +01:00
|
|
|
{
|
|
|
|
cacheBlockMask = ~(cpu->cacheLineSize() - 1);
|
|
|
|
}
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
Addr cacheBlockMask;
|
2006-05-16 23:36:50 +02:00
|
|
|
protected:
|
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
/** Snoop a coherence request, we need to check if this causes
|
|
|
|
* a wakeup event on a cpu that is monitoring an address
|
|
|
|
*/
|
|
|
|
virtual void recvTimingSnoopReq(PacketPtr pkt);
|
2014-11-06 12:42:22 +01:00
|
|
|
virtual void recvFunctionalSnoop(PacketPtr pkt);
|
2014-01-24 22:29:30 +01:00
|
|
|
|
MEM: Separate requests and responses for timing accesses
This patch moves send/recvTiming and send/recvTimingSnoop from the
Port base class to the MasterPort and SlavePort, and also splits them
into separate member functions for requests and responses:
send/recvTimingReq, send/recvTimingResp, and send/recvTimingSnoopReq,
send/recvTimingSnoopResp. A master port sends requests and receives
responses, and also receives snoop requests and sends snoop
responses. A slave port has the reciprocal behaviour as it receives
requests and sends responses, and sends snoop requests and receives
snoop responses.
For all MemObjects that have only master ports or slave ports (but not
both), e.g. a CPU, or a PIO device, this patch merely adds more
clarity to what kind of access is taking place. For example, a CPU
port used to call sendTiming, and will now call
sendTimingReq. Similarly, a response previously came back through
recvTiming, which is now recvTimingResp. For the modules that have
both master and slave ports, e.g. the bus, the behaviour was
previously relying on branches based on pkt->isRequest(), and this is
now replaced with a direct call to the apprioriate member function
depending on the type of access. Please note that send/recvRetry is
still shared by all the timing accessors and remains in the Port base
class for now (to maintain the current bus functionality and avoid
changing the statistics of all regressions).
The packet queue is split into a MasterPort and SlavePort version to
facilitate the use of the new timing accessors. All uses of the
PacketQueue are updated accordingly.
With this patch, the type of packet (request or response) is now well
defined for each type of access, and asserts on pkt->isRequest() and
pkt->isResponse() are now moved to the appropriate send member
functions. It is also worth noting that sendTimingSnoopReq no longer
returns a boolean, as the semantics do not alow snoop requests to be
rejected or stalled. All these assumptions are now excplicitly part of
the port interface itself.
2012-05-01 19:40:42 +02:00
|
|
|
virtual bool recvTimingResp(PacketPtr pkt);
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2015-03-02 10:00:35 +01:00
|
|
|
virtual void recvReqRetry();
|
2006-07-21 01:00:40 +02:00
|
|
|
|
2014-11-06 12:42:22 +01:00
|
|
|
virtual bool isSnooping() const {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-07-21 01:00:40 +02:00
|
|
|
struct DTickEvent : public TickEvent
|
|
|
|
{
|
|
|
|
DTickEvent(TimingSimpleCPU *_cpu)
|
|
|
|
: TickEvent(_cpu) {}
|
|
|
|
void process();
|
2008-02-06 22:32:40 +01:00
|
|
|
const char *description() const { return "Timing CPU dcache tick"; }
|
2006-07-21 01:00:40 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
DTickEvent tickEvent;
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
};
|
|
|
|
|
2014-10-16 11:49:41 +02:00
|
|
|
void updateCycleCounts();
|
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
IcachePort icachePort;
|
|
|
|
DcachePort dcachePort;
|
|
|
|
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr ifetch_pkt;
|
|
|
|
PacketPtr dcache_pkt;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2014-10-16 11:49:41 +02:00
|
|
|
Cycles previousCycle;
|
2006-10-08 06:55:05 +02:00
|
|
|
|
2012-02-24 17:42:00 +01:00
|
|
|
protected:
|
|
|
|
|
|
|
|
/** Return a reference to the data port. */
|
2015-10-12 10:08:01 +02:00
|
|
|
MasterPort &getDataPort() override { return dcachePort; }
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2012-02-24 17:42:00 +01:00
|
|
|
/** Return a reference to the instruction port. */
|
2015-10-12 10:08:01 +02:00
|
|
|
MasterPort &getInstPort() override { return icachePort; }
|
2012-02-24 17:42:00 +01:00
|
|
|
|
|
|
|
public:
|
2006-07-07 21:15:11 +02:00
|
|
|
|
2015-10-12 10:07:59 +02:00
|
|
|
DrainState drain() override;
|
|
|
|
void drainResume() override;
|
2006-06-30 01:45:24 +02:00
|
|
|
|
2015-10-12 10:08:01 +02:00
|
|
|
void switchOut() override;
|
|
|
|
void takeOverFrom(BaseCPU *oldCPU) override;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2015-10-12 10:08:01 +02:00
|
|
|
void verifyMemoryMode() const override;
|
2013-02-15 23:40:08 +01:00
|
|
|
|
2015-10-12 10:08:01 +02:00
|
|
|
void activateContext(ThreadID thread_num) override;
|
|
|
|
void suspendContext(ThreadID thread_num) override;
|
2006-05-16 23:36:50 +02:00
|
|
|
|
2015-10-12 10:08:01 +02:00
|
|
|
Fault readMem(Addr addr, uint8_t *data, unsigned size,
|
2016-08-15 13:00:35 +02:00
|
|
|
Request::Flags flags) override;
|
2010-08-13 15:16:02 +02:00
|
|
|
|
2016-08-15 13:00:35 +02:00
|
|
|
Fault initiateMemRead(Addr addr, unsigned size,
|
|
|
|
Request::Flags flags) override;
|
cpu. arch: add initiateMemRead() to ExecContext interface
For historical reasons, the ExecContext interface had a single
function, readMem(), that did two different things depending on
whether the ExecContext supported atomic memory mode (i.e.,
AtomicSimpleCPU) or timing memory mode (all the other models).
In the former case, it actually performed a memory read; in the
latter case, it merely initiated a read access, and the read
completion did not happen until later when a response packet
arrived from the memory system.
This led to some confusing things, including timing accesses
being required to provide a pointer for the return data even
though that pointer was only used in atomic mode.
This patch splits this interface, adding a new initiateMemRead()
function to the ExecContext interface to replace the timing-mode
use of readMem().
For consistency and clarity, the readMemTiming() helper function
in the ISA definitions is renamed to initiateMemRead() as well.
For x86, where the access size is passed in explicitly, we can
also get rid of the data parameter at this level. For other ISAs,
where the access size is determined from the type of the data
parameter, we have to keep the parameter for that purpose.
2016-01-18 03:27:46 +01:00
|
|
|
|
2011-07-03 07:35:04 +02:00
|
|
|
Fault writeMem(uint8_t *data, unsigned size,
|
2016-08-15 13:00:35 +02:00
|
|
|
Addr addr, Request::Flags flags, uint64_t *res) override;
|
2010-08-13 15:16:02 +02:00
|
|
|
|
2006-05-16 23:36:50 +02:00
|
|
|
void fetch();
|
2014-09-19 16:35:18 +02:00
|
|
|
void sendFetch(const Fault &fault, RequestPtr req, ThreadContext *tc);
|
2006-10-20 09:10:12 +02:00
|
|
|
void completeIfetch(PacketPtr );
|
2009-02-25 19:16:15 +01:00
|
|
|
void completeDataAccess(PacketPtr pkt);
|
2014-09-19 16:35:18 +02:00
|
|
|
void advanceInst(const Fault &fault);
|
2007-05-21 06:43:01 +02:00
|
|
|
|
2012-09-25 18:49:40 +02:00
|
|
|
/** This function is used by the page table walker to determine if it could
|
|
|
|
* translate the a pending request or if the underlying request has been
|
|
|
|
* squashed. This always returns false for the simple timing CPU as it never
|
|
|
|
* executes any instructions speculatively.
|
|
|
|
* @ return Is the current instruction squashed?
|
|
|
|
*/
|
|
|
|
bool isSquashed() const { return false; }
|
|
|
|
|
2008-01-02 22:46:22 +01:00
|
|
|
/**
|
|
|
|
* Print state of address in memory system via PrintReq (for
|
|
|
|
* debugging).
|
|
|
|
*/
|
|
|
|
void printAddr(Addr a);
|
|
|
|
|
2010-02-12 20:53:19 +01:00
|
|
|
/**
|
|
|
|
* Finish a DTB translation.
|
|
|
|
* @param state The DTB translation state.
|
|
|
|
*/
|
|
|
|
void finishTranslation(WholeTranslationState *state);
|
|
|
|
|
2006-06-30 01:45:24 +02:00
|
|
|
private:
|
2007-05-21 06:43:01 +02:00
|
|
|
|
|
|
|
typedef EventWrapper<TimingSimpleCPU, &TimingSimpleCPU::fetch> FetchEvent;
|
2008-10-27 23:18:04 +01:00
|
|
|
FetchEvent fetchEvent;
|
2007-05-21 06:43:01 +02:00
|
|
|
|
2007-10-01 08:55:27 +02:00
|
|
|
struct IprEvent : Event {
|
|
|
|
Packet *pkt;
|
|
|
|
TimingSimpleCPU *cpu;
|
|
|
|
IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t);
|
|
|
|
virtual void process();
|
2008-02-06 22:32:40 +01:00
|
|
|
virtual const char *description() const;
|
2007-10-01 08:55:27 +02:00
|
|
|
};
|
|
|
|
|
2013-01-07 19:05:46 +01:00
|
|
|
/**
|
|
|
|
* Check if a system is in a drained state.
|
|
|
|
*
|
|
|
|
* We need to drain if:
|
|
|
|
* <ul>
|
|
|
|
* <li>We are in the middle of a microcode sequence as some CPUs
|
|
|
|
* (e.g., HW accelerated CPUs) can't be started in the middle
|
|
|
|
* of a gem5 microcode sequence.
|
|
|
|
*
|
|
|
|
* <li>Stay at PC is true.
|
2013-08-19 09:52:30 +02:00
|
|
|
*
|
|
|
|
* <li>A fetch event is scheduled. Normally this would never be the
|
2013-08-20 17:21:27 +02:00
|
|
|
* case with microPC() == 0, but right after a context is
|
|
|
|
* activated it can happen.
|
2013-01-07 19:05:46 +01:00
|
|
|
* </ul>
|
|
|
|
*/
|
|
|
|
bool isDrained() {
|
2015-09-30 18:14:19 +02:00
|
|
|
SimpleExecContext& t_info = *threadInfo[curThread];
|
|
|
|
SimpleThread* thread = t_info.thread;
|
|
|
|
|
|
|
|
return thread->microPC() == 0 && !t_info.stayAtPC &&
|
|
|
|
!fetchEvent.scheduled();
|
2013-01-07 19:05:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Try to complete a drain request.
|
|
|
|
*
|
|
|
|
* @returns true if the CPU is drained, false otherwise.
|
|
|
|
*/
|
|
|
|
bool tryCompleteDrain();
|
2006-05-16 23:36:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif // __CPU_SIMPLE_TIMING_HH__
|