2006-01-31 18:12:49 +01:00
|
|
|
/*
|
2016-08-12 15:11:45 +02:00
|
|
|
* Copyright (c) 2012-2016 ARM Limited
|
MEM: Remove the Broadcast destination from the packet
This patch simplifies the packet by removing the broadcast flag and
instead more firmly relying on (and enforcing) the semantics of
transactions in the classic memory system, i.e. request packets are
routed from a master to a slave based on the address, and when they
are created they have neither a valid source, nor destination. On
their way to the slave, the request packet is updated with a source
field for all modules that multiplex packets from multiple master
(e.g. a bus). When a request packet is turned into a response packet
(at the final slave), it moves the potentially populated source field
to the destination field, and the response packet is routed through
any multiplexing components back to the master based on the
destination field.
Modules that connect multiplexing components, such as caches and
bridges store any existing source and destination field in the sender
state as a stack (just as before).
The packet constructor is simplified in that there is no longer a need
to pass the Packet::Broadcast as the destination (this was always the
case for the classic memory system). In the case of Ruby, rather than
using the parameter to the constructor we now rely on setDest, as
there is already another three-argument constructor in the packet
class.
In many places where the packet information was printed as part of
DPRINTFs, request packets would be printed with a numeric "dest" that
would always be -1 (Broadcast) and that field is now removed from the
printing.
2012-04-14 11:45:55 +02:00
|
|
|
* All rights reserved
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
2006-05-02 00:53:28 +02:00
|
|
|
* Copyright (c) 2006 The Regents of The University of Michigan
|
2015-07-20 16:15:18 +02:00
|
|
|
* Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
|
2006-01-31 18:12:49 +01:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2006-06-01 01:26:56 +02:00
|
|
|
*
|
|
|
|
* Authors: Ron Dreslinski
|
|
|
|
* Steve Reinhardt
|
|
|
|
* Ali Saidi
|
2012-05-30 11:29:42 +02:00
|
|
|
* Andreas Hansson
|
2006-01-31 18:12:49 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
2006-05-31 04:30:42 +02:00
|
|
|
* Declaration of the Packet class.
|
2006-01-31 18:12:49 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __MEM_PACKET_HH__
|
|
|
|
#define __MEM_PACKET_HH__
|
|
|
|
|
2011-04-15 19:44:06 +02:00
|
|
|
#include <bitset>
|
2006-10-20 08:38:45 +02:00
|
|
|
#include <cassert>
|
|
|
|
#include <list>
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
#include "base/cast.hh"
|
2007-02-07 07:31:15 +01:00
|
|
|
#include "base/compiler.hh"
|
2008-11-10 20:51:17 +01:00
|
|
|
#include "base/flags.hh"
|
2007-01-27 21:38:04 +01:00
|
|
|
#include "base/misc.hh"
|
2008-01-02 21:20:15 +01:00
|
|
|
#include "base/printable.hh"
|
2009-05-17 23:34:50 +02:00
|
|
|
#include "base/types.hh"
|
2009-05-17 23:34:52 +02:00
|
|
|
#include "mem/request.hh"
|
2007-03-06 20:13:43 +01:00
|
|
|
#include "sim/core.hh"
|
2006-02-15 20:21:09 +01:00
|
|
|
|
2012-01-31 18:05:52 +01:00
|
|
|
class Packet;
|
2006-10-20 09:10:12 +02:00
|
|
|
typedef Packet *PacketPtr;
|
2006-02-15 20:21:09 +01:00
|
|
|
typedef uint8_t* PacketDataPtr;
|
2006-06-28 23:28:33 +02:00
|
|
|
typedef std::list<PacketPtr> PacketList;
|
|
|
|
|
2007-02-07 19:53:37 +01:00
|
|
|
class MemCmd
|
|
|
|
{
|
2008-11-10 20:51:17 +01:00
|
|
|
friend class Packet;
|
2007-02-07 19:53:37 +01:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
public:
|
|
|
|
/**
|
|
|
|
* List of all commands associated with a packet.
|
|
|
|
*/
|
2007-02-07 19:53:37 +01:00
|
|
|
enum Command
|
|
|
|
{
|
|
|
|
InvalidCmd,
|
|
|
|
ReadReq,
|
|
|
|
ReadResp,
|
2008-01-03 00:22:38 +01:00
|
|
|
ReadRespWithInvalidate,
|
2007-05-22 15:29:48 +02:00
|
|
|
WriteReq,
|
2007-02-07 19:53:37 +01:00
|
|
|
WriteResp,
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
WritebackDirty,
|
|
|
|
WritebackClean,
|
2015-07-03 16:14:37 +02:00
|
|
|
CleanEvict,
|
2007-02-07 19:53:37 +01:00
|
|
|
SoftPFReq,
|
|
|
|
HardPFReq,
|
|
|
|
SoftPFResp,
|
|
|
|
HardPFResp,
|
2015-07-03 16:14:41 +02:00
|
|
|
WriteLineReq,
|
2007-02-07 19:53:37 +01:00
|
|
|
UpgradeReq,
|
2010-06-17 00:25:57 +02:00
|
|
|
SCUpgradeReq, // Special "weak" upgrade for StoreCond
|
2007-06-21 20:59:17 +02:00
|
|
|
UpgradeResp,
|
2010-06-17 00:25:57 +02:00
|
|
|
SCUpgradeFailReq, // Failed SCUpgradeReq in MSHR (never sent)
|
|
|
|
UpgradeFailResp, // Valid for SCUpgradeReq only
|
2007-02-07 19:53:37 +01:00
|
|
|
ReadExReq,
|
|
|
|
ReadExResp,
|
mem: Add ReadCleanReq and ReadSharedReq packets
This patch adds two new read requests packets:
ReadCleanReq - For a cache to explicitly request clean data. The
response is thus exclusive or shared, but not owned or modified. The
read-only caches (see previous patch) use this request type to ensure
they do not get dirty data.
ReadSharedReq - We add this to distinguish cache read requests from
those issued by other masters, such as devices and CPUs. Thus, devices
use ReadReq, and caches use ReadCleanReq, ReadExReq, or
ReadSharedReq. For the latter, the response can be any state, shared,
exclusive, owned or even modified.
Both ReadCleanReq and ReadSharedReq re-use the normal ReadResp. The
two transactions are aligned with the emerging cache-coherent TLM
standard and the AMBA nomenclature.
With this change, the normal ReadReq should never be used by a cache,
and is reserved for the actual (non-caching) masters in the system. We
thus have a way of identifying if a request came from a cache or
not. The introduction of ReadSharedReq thus removes the need for the
current isTopLevel hack, and also allows us to stop relying on
checking the packet size to determine if the source is a cache or
not. This is fixed in follow-on patches.
2015-07-03 16:14:40 +02:00
|
|
|
ReadCleanReq,
|
|
|
|
ReadSharedReq,
|
2007-06-18 02:27:53 +02:00
|
|
|
LoadLockedReq,
|
|
|
|
StoreCondReq,
|
2010-09-09 20:40:19 +02:00
|
|
|
StoreCondFailReq, // Failed StoreCondReq in MSHR (never sent)
|
2007-06-18 02:27:53 +02:00
|
|
|
StoreCondResp,
|
2007-02-12 19:06:30 +01:00
|
|
|
SwapReq,
|
|
|
|
SwapResp,
|
2008-10-12 21:08:51 +02:00
|
|
|
MessageReq,
|
|
|
|
MessageResp,
|
2015-12-10 04:56:31 +01:00
|
|
|
MemFenceReq,
|
|
|
|
MemFenceResp,
|
2007-06-30 19:16:18 +02:00
|
|
|
// Error responses
|
|
|
|
// @TODO these should be classified as responses rather than
|
|
|
|
// requests; coding them as requests initially for backwards
|
|
|
|
// compatibility
|
|
|
|
InvalidDestError, // packet dest field invalid
|
|
|
|
BadAddressError, // memory address invalid
|
2011-07-01 02:49:26 +02:00
|
|
|
FunctionalReadError, // unable to fulfill functional read
|
|
|
|
FunctionalWriteError, // unable to fulfill functional write
|
2008-01-02 21:20:15 +01:00
|
|
|
// Fake simulator-only commands
|
|
|
|
PrintReq, // Print state matching address
|
2011-03-28 17:49:45 +02:00
|
|
|
FlushReq, //request for a cache flush
|
2015-07-03 16:14:41 +02:00
|
|
|
InvalidateReq, // request for address to be invalidated
|
|
|
|
InvalidateResp,
|
2007-02-07 19:53:37 +01:00
|
|
|
NUM_MEM_CMDS
|
|
|
|
};
|
|
|
|
|
|
|
|
private:
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* List of command attributes.
|
|
|
|
*/
|
2007-02-07 19:53:37 +01:00
|
|
|
enum Attribute
|
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
IsRead, //!< Data flows from responder to requester
|
|
|
|
IsWrite, //!< Data flows from requester to responder
|
2010-06-17 00:25:57 +02:00
|
|
|
IsUpgrade,
|
2007-02-07 19:53:37 +01:00
|
|
|
IsInvalidate,
|
2015-12-31 15:32:58 +01:00
|
|
|
NeedsWritable, //!< Requires writable copy to complete in-cache
|
2007-06-18 02:27:53 +02:00
|
|
|
IsRequest, //!< Issued by requester
|
|
|
|
IsResponse, //!< Issue by responder
|
|
|
|
NeedsResponse, //!< Requester needs response from target
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
IsEviction,
|
2007-02-07 19:53:37 +01:00
|
|
|
IsSWPrefetch,
|
|
|
|
IsHWPrefetch,
|
2009-04-19 13:25:01 +02:00
|
|
|
IsLlsc, //!< Alpha/MIPS LL or SC access
|
2007-06-18 02:27:53 +02:00
|
|
|
HasData, //!< There is an associated payload
|
2007-06-30 19:16:18 +02:00
|
|
|
IsError, //!< Error response
|
2008-01-02 21:20:15 +01:00
|
|
|
IsPrint, //!< Print state matching address (for debugging)
|
2011-03-28 17:49:45 +02:00
|
|
|
IsFlush, //!< Flush the address from caches
|
2016-08-12 15:11:45 +02:00
|
|
|
FromCache, //!< Request originated from a caching agent
|
2007-02-07 19:53:37 +01:00
|
|
|
NUM_COMMAND_ATTRIBUTES
|
|
|
|
};
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Structure that defines attributes and other data associated
|
|
|
|
* with a Command.
|
|
|
|
*/
|
|
|
|
struct CommandInfo
|
|
|
|
{
|
|
|
|
/// Set of attribute flags.
|
2007-02-07 19:53:37 +01:00
|
|
|
const std::bitset<NUM_COMMAND_ATTRIBUTES> attributes;
|
2008-11-10 20:51:17 +01:00
|
|
|
/// Corresponding response for requests; InvalidCmd if no
|
|
|
|
/// response is applicable.
|
2007-02-07 19:53:37 +01:00
|
|
|
const Command response;
|
2008-11-10 20:51:17 +01:00
|
|
|
/// String representation (for printing)
|
2007-02-07 19:53:37 +01:00
|
|
|
const std::string str;
|
|
|
|
};
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// Array to map Command enum to associated info.
|
2007-02-07 19:53:37 +01:00
|
|
|
static const CommandInfo commandInfo[];
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
Command cmd;
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
bool
|
|
|
|
testCmdAttrib(MemCmd::Attribute attrib) const
|
|
|
|
{
|
2007-02-07 19:53:37 +01:00
|
|
|
return commandInfo[cmd].attributes[attrib] != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
2014-12-02 12:08:19 +01:00
|
|
|
bool isRead() const { return testCmdAttrib(IsRead); }
|
|
|
|
bool isWrite() const { return testCmdAttrib(IsWrite); }
|
|
|
|
bool isUpgrade() const { return testCmdAttrib(IsUpgrade); }
|
|
|
|
bool isRequest() const { return testCmdAttrib(IsRequest); }
|
|
|
|
bool isResponse() const { return testCmdAttrib(IsResponse); }
|
2015-12-31 15:32:58 +01:00
|
|
|
bool needsWritable() const { return testCmdAttrib(NeedsWritable); }
|
2014-12-02 12:08:19 +01:00
|
|
|
bool needsResponse() const { return testCmdAttrib(NeedsResponse); }
|
|
|
|
bool isInvalidate() const { return testCmdAttrib(IsInvalidate); }
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
bool isEviction() const { return testCmdAttrib(IsEviction); }
|
2016-08-12 15:11:45 +02:00
|
|
|
bool fromCache() const { return testCmdAttrib(FromCache); }
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* A writeback is an eviction that carries data.
|
|
|
|
*/
|
|
|
|
bool isWriteback() const { return testCmdAttrib(IsEviction) &&
|
|
|
|
testCmdAttrib(HasData); }
|
2014-12-02 12:07:52 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Check if this particular packet type carries payload data. Note
|
|
|
|
* that this does not reflect if the data pointer of the packet is
|
|
|
|
* valid or not.
|
|
|
|
*/
|
2007-02-07 19:53:37 +01:00
|
|
|
bool hasData() const { return testCmdAttrib(HasData); }
|
2009-04-20 06:44:15 +02:00
|
|
|
bool isLLSC() const { return testCmdAttrib(IsLlsc); }
|
2014-05-13 19:20:49 +02:00
|
|
|
bool isSWPrefetch() const { return testCmdAttrib(IsSWPrefetch); }
|
|
|
|
bool isHWPrefetch() const { return testCmdAttrib(IsHWPrefetch); }
|
|
|
|
bool isPrefetch() const { return testCmdAttrib(IsSWPrefetch) ||
|
|
|
|
testCmdAttrib(IsHWPrefetch); }
|
2007-06-30 19:16:18 +02:00
|
|
|
bool isError() const { return testCmdAttrib(IsError); }
|
2008-01-02 21:20:15 +01:00
|
|
|
bool isPrint() const { return testCmdAttrib(IsPrint); }
|
2011-03-28 17:49:45 +02:00
|
|
|
bool isFlush() const { return testCmdAttrib(IsFlush); }
|
2007-02-07 19:53:37 +01:00
|
|
|
|
2016-01-11 11:52:20 +01:00
|
|
|
Command
|
2008-11-10 20:51:17 +01:00
|
|
|
responseCommand() const
|
|
|
|
{
|
2007-02-07 19:53:37 +01:00
|
|
|
return commandInfo[cmd].response;
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// Return the string to a cmd given by idx.
|
|
|
|
const std::string &toString() const { return commandInfo[cmd].str; }
|
2007-02-07 19:53:37 +01:00
|
|
|
int toInt() const { return (int)cmd; }
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
MemCmd(Command _cmd) : cmd(_cmd) { }
|
|
|
|
MemCmd(int _cmd) : cmd((Command)_cmd) { }
|
|
|
|
MemCmd() : cmd(InvalidCmd) { }
|
2007-02-07 19:53:37 +01:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
bool operator==(MemCmd c2) const { return (cmd == c2.cmd); }
|
|
|
|
bool operator!=(MemCmd c2) const { return (cmd != c2.cmd); }
|
2007-02-07 19:53:37 +01:00
|
|
|
};
|
|
|
|
|
2006-01-31 18:12:49 +01:00
|
|
|
/**
|
2006-05-31 04:30:42 +02:00
|
|
|
* A Packet is used to encapsulate a transfer between two objects in
|
|
|
|
* the memory system (e.g., the L1 and L2 cache). (In contrast, a
|
|
|
|
* single Request travels all the way from the requester to the
|
|
|
|
* ultimate destination and back, possibly being conveyed by several
|
|
|
|
* different Packets along the way.)
|
2006-01-31 18:12:49 +01:00
|
|
|
*/
|
2012-06-05 07:23:08 +02:00
|
|
|
class Packet : public Printable
|
2006-01-31 18:12:49 +01:00
|
|
|
{
|
2006-06-29 22:07:19 +02:00
|
|
|
public:
|
2008-11-10 20:51:17 +01:00
|
|
|
typedef uint32_t FlagsType;
|
|
|
|
typedef ::Flags<FlagsType> Flags;
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
2015-07-30 09:41:38 +02:00
|
|
|
enum : FlagsType {
|
|
|
|
// Flags to transfer across when copying a packet
|
|
|
|
COPY_FLAGS = 0x0000000F,
|
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
// Does this packet have sharers (which means it should not be
|
|
|
|
// considered writable) or not. See setHasSharers below.
|
|
|
|
HAS_SHARERS = 0x00000001,
|
|
|
|
|
2015-07-30 09:41:38 +02:00
|
|
|
// Special control flags
|
|
|
|
/// Special timing-mode atomic snoop for multi-level coherence.
|
|
|
|
EXPRESS_SNOOP = 0x00000002,
|
2015-12-31 15:32:58 +01:00
|
|
|
|
|
|
|
/// Allow a responding cache to inform the cache hierarchy
|
|
|
|
/// that it had a writable copy before responding. See
|
|
|
|
/// setResponderHadWritable below.
|
|
|
|
RESPONDER_HAD_WRITABLE = 0x00000004,
|
|
|
|
|
|
|
|
// Snoop co-ordination flag to indicate that a cache is
|
|
|
|
// responding to a snoop. See setCacheResponding below.
|
|
|
|
CACHE_RESPONDING = 0x00000008,
|
2015-07-30 09:41:38 +02:00
|
|
|
|
2015-08-24 11:03:45 +02:00
|
|
|
/// Are the 'addr' and 'size' fields valid?
|
|
|
|
VALID_ADDR = 0x00000100,
|
|
|
|
VALID_SIZE = 0x00000200,
|
|
|
|
|
2015-07-30 09:41:38 +02:00
|
|
|
/// Is the data pointer set to a value that shouldn't be freed
|
|
|
|
/// when the packet is destroyed?
|
|
|
|
STATIC_DATA = 0x00001000,
|
|
|
|
/// The data pointer points to a value that should be freed when
|
|
|
|
/// the packet is destroyed. The pointer is assumed to be pointing
|
|
|
|
/// to an array, and delete [] is consequently called
|
|
|
|
DYNAMIC_DATA = 0x00002000,
|
|
|
|
|
|
|
|
/// suppress the error if this packet encounters a functional
|
|
|
|
/// access failure.
|
|
|
|
SUPPRESS_FUNC_ERROR = 0x00008000,
|
|
|
|
|
|
|
|
// Signal block present to squash prefetch and cache evict packets
|
|
|
|
// through express snoop flag
|
|
|
|
BLOCK_CACHED = 0x00010000
|
|
|
|
};
|
2008-11-10 20:51:17 +01:00
|
|
|
|
|
|
|
Flags flags;
|
2007-02-07 19:53:37 +01:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
public:
|
2007-02-07 19:53:37 +01:00
|
|
|
typedef MemCmd::Command Command;
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// The command field of the packet.
|
2007-06-30 19:16:18 +02:00
|
|
|
MemCmd cmd;
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// A pointer to the original request.
|
2014-12-02 12:07:50 +01:00
|
|
|
const RequestPtr req;
|
2007-06-30 19:16:18 +02:00
|
|
|
|
2006-04-25 01:31:50 +02:00
|
|
|
private:
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* A pointer to the data being transfered. It can be differnt
|
|
|
|
* sizes at each level of the heirarchy so it belongs in the
|
|
|
|
* packet, not request. This may or may not be populated when a
|
|
|
|
* responder recieves the packet. If not populated it memory should
|
|
|
|
* be allocated.
|
2006-04-25 01:31:50 +02:00
|
|
|
*/
|
|
|
|
PacketDataPtr data;
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// The address of the request. This address could be virtual or
|
|
|
|
/// physical, depending on the system configuration.
|
2006-01-31 18:12:49 +01:00
|
|
|
Addr addr;
|
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
/// True if the request targets the secure memory space.
|
|
|
|
bool _isSecure;
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// The size of the request or transfer.
|
2009-06-05 08:21:12 +02:00
|
|
|
unsigned size;
|
2006-05-26 20:17:33 +02:00
|
|
|
|
2012-01-10 01:10:05 +01:00
|
|
|
/**
|
2015-03-02 10:00:52 +01:00
|
|
|
* Track the bytes found that satisfy a functional read.
|
2012-01-10 01:10:05 +01:00
|
|
|
*/
|
2015-03-02 10:00:52 +01:00
|
|
|
std::vector<bool> bytesValid;
|
2012-01-10 01:10:05 +01:00
|
|
|
|
2006-05-26 20:17:33 +02:00
|
|
|
public:
|
2006-06-28 20:35:00 +02:00
|
|
|
|
2013-02-19 11:56:06 +01:00
|
|
|
/**
|
2015-02-11 16:23:47 +01:00
|
|
|
* The extra delay from seeing the packet until the header is
|
2014-09-20 23:18:32 +02:00
|
|
|
* transmitted. This delay is used to communicate the crossbar
|
|
|
|
* forwarding latency to the neighbouring object (e.g. a cache)
|
|
|
|
* that actually makes the packet wait. As the delay is relative,
|
|
|
|
* a 32-bit unsigned should be sufficient.
|
2013-02-19 11:56:06 +01:00
|
|
|
*/
|
2015-02-11 16:23:47 +01:00
|
|
|
uint32_t headerDelay;
|
2006-10-10 05:24:21 +02:00
|
|
|
|
2015-09-25 13:13:54 +02:00
|
|
|
/**
|
|
|
|
* Keep track of the extra delay incurred by snooping upwards
|
|
|
|
* before sending a request down the memory system. This is used
|
|
|
|
* by the coherent crossbar to account for the additional request
|
|
|
|
* delay.
|
|
|
|
*/
|
|
|
|
uint32_t snoopDelay;
|
|
|
|
|
2013-02-19 11:56:06 +01:00
|
|
|
/**
|
2015-02-11 16:23:47 +01:00
|
|
|
* The extra pipelining delay from seeing the packet until the end of
|
|
|
|
* payload is transmitted by the component that provided it (if
|
|
|
|
* any). This includes the header delay. Similar to the header
|
|
|
|
* delay, this is used to make up for the fact that the
|
2014-09-20 23:18:32 +02:00
|
|
|
* crossbar does not make the packet wait. As the delay is
|
|
|
|
* relative, a 32-bit unsigned should be sufficient.
|
2013-02-19 11:56:06 +01:00
|
|
|
*/
|
2015-02-11 16:23:47 +01:00
|
|
|
uint32_t payloadDelay;
|
2006-10-10 05:24:21 +02:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* A virtual base opaque structure used to hold state associated
|
2013-02-19 11:56:05 +01:00
|
|
|
* with the packet (e.g., an MSHR), specific to a MemObject that
|
|
|
|
* sees the packet. A pointer to this state is returned in the
|
|
|
|
* packet's response so that the MemObject in question can quickly
|
|
|
|
* look up the state needed to process it. A specific subclass
|
|
|
|
* would be derived from this to carry state specific to a
|
|
|
|
* particular sending device.
|
|
|
|
*
|
|
|
|
* As multiple MemObjects may add their SenderState throughout the
|
|
|
|
* memory system, the SenderStates create a stack, where a
|
|
|
|
* MemObject can add a new Senderstate, as long as the
|
|
|
|
* predecessing SenderState is restored when the response comes
|
|
|
|
* back. For this reason, the predecessor should always be
|
|
|
|
* populated with the current SenderState of a packet before
|
|
|
|
* modifying the senderState field in the request packet.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
|
|
|
struct SenderState
|
|
|
|
{
|
2013-02-19 11:56:05 +01:00
|
|
|
SenderState* predecessor;
|
|
|
|
SenderState() : predecessor(NULL) {}
|
2006-05-26 20:17:33 +02:00
|
|
|
virtual ~SenderState() {}
|
|
|
|
};
|
2006-01-31 18:12:49 +01:00
|
|
|
|
2008-01-02 22:46:22 +01:00
|
|
|
/**
|
|
|
|
* Object used to maintain state of a PrintReq. The senderState
|
|
|
|
* field of a PrintReq should always be of this type.
|
|
|
|
*/
|
2012-06-05 07:23:08 +02:00
|
|
|
class PrintReqState : public SenderState
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
|
|
|
private:
|
|
|
|
/**
|
|
|
|
* An entry in the label stack.
|
|
|
|
*/
|
|
|
|
struct LabelStackEntry
|
|
|
|
{
|
2008-01-02 21:20:15 +01:00
|
|
|
const std::string label;
|
|
|
|
std::string *prefix;
|
|
|
|
bool labelPrinted;
|
2008-11-10 20:51:17 +01:00
|
|
|
LabelStackEntry(const std::string &_label, std::string *_prefix);
|
2008-01-02 21:20:15 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef std::list<LabelStackEntry> LabelStack;
|
|
|
|
LabelStack labelStack;
|
|
|
|
|
|
|
|
std::string *curPrefixPtr;
|
|
|
|
|
|
|
|
public:
|
|
|
|
std::ostream &os;
|
|
|
|
const int verbosity;
|
|
|
|
|
|
|
|
PrintReqState(std::ostream &os, int verbosity = 0);
|
|
|
|
~PrintReqState();
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Returns the current line prefix.
|
|
|
|
*/
|
2008-01-02 21:20:15 +01:00
|
|
|
const std::string &curPrefix() { return *curPrefixPtr; }
|
2008-01-02 22:46:22 +01:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Push a label onto the label stack, and prepend the given
|
2008-01-02 22:46:22 +01:00
|
|
|
* prefix string onto the current prefix. Labels will only be
|
2008-11-10 20:51:17 +01:00
|
|
|
* printed if an object within the label's scope is printed.
|
|
|
|
*/
|
2008-01-02 21:20:15 +01:00
|
|
|
void pushLabel(const std::string &lbl,
|
|
|
|
const std::string &prefix = " ");
|
2008-11-10 20:51:17 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Pop a label off the label stack.
|
|
|
|
*/
|
2008-01-02 21:20:15 +01:00
|
|
|
void popLabel();
|
2008-11-10 20:51:17 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Print all of the pending unprinted labels on the
|
2008-01-02 22:46:22 +01:00
|
|
|
* stack. Called by printObj(), so normally not called by
|
2008-11-10 20:51:17 +01:00
|
|
|
* users unless bypassing printObj().
|
|
|
|
*/
|
2008-01-02 21:20:15 +01:00
|
|
|
void printLabels();
|
2008-11-10 20:51:17 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Print a Printable object to os, because it matched the
|
|
|
|
* address on a PrintReq.
|
|
|
|
*/
|
2008-01-02 21:20:15 +01:00
|
|
|
void printObj(Printable *obj);
|
|
|
|
};
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* This packet's sender state. Devices should use dynamic_cast<>
|
|
|
|
* to cast to the state appropriate to the sender. The intent of
|
|
|
|
* this variable is to allow a device to attach extra information
|
2013-02-19 11:56:05 +01:00
|
|
|
* to a request. A response packet must return the sender state
|
2008-11-10 20:51:17 +01:00
|
|
|
* that was attached to the original request (even if a new packet
|
|
|
|
* is created).
|
|
|
|
*/
|
2006-05-31 04:30:42 +02:00
|
|
|
SenderState *senderState;
|
2006-01-31 18:12:49 +01:00
|
|
|
|
2013-02-19 11:56:05 +01:00
|
|
|
/**
|
|
|
|
* Push a new sender state to the packet and make the current
|
|
|
|
* sender state the predecessor of the new one. This should be
|
|
|
|
* prefered over direct manipulation of the senderState member
|
|
|
|
* variable.
|
|
|
|
*
|
|
|
|
* @param sender_state SenderState to push at the top of the stack
|
|
|
|
*/
|
|
|
|
void pushSenderState(SenderState *sender_state);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Pop the top of the state stack and return a pointer to it. This
|
|
|
|
* assumes the current sender state is not NULL. This should be
|
|
|
|
* preferred over direct manipulation of the senderState member
|
|
|
|
* variable.
|
|
|
|
*
|
|
|
|
* @return The current top of the stack
|
|
|
|
*/
|
|
|
|
SenderState *popSenderState();
|
|
|
|
|
2013-02-19 11:56:06 +01:00
|
|
|
/**
|
|
|
|
* Go through the sender state stack and return the first instance
|
|
|
|
* that is of type T (as determined by a dynamic_cast). If there
|
|
|
|
* is no sender state of type T, NULL is returned.
|
|
|
|
*
|
|
|
|
* @return The topmost state of type T
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T * findNextSenderState() const
|
|
|
|
{
|
|
|
|
T *t = NULL;
|
|
|
|
SenderState* sender_state = senderState;
|
|
|
|
while (t == NULL && sender_state != NULL) {
|
|
|
|
t = dynamic_cast<T*>(sender_state);
|
|
|
|
sender_state = sender_state->predecessor;
|
|
|
|
}
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// Return the string name of the cmd field (for debugging and
|
|
|
|
/// tracing).
|
2007-02-07 19:53:37 +01:00
|
|
|
const std::string &cmdString() const { return cmd.toString(); }
|
2006-06-28 20:35:00 +02:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/// Return the index of this command.
|
2007-02-07 19:53:37 +01:00
|
|
|
inline int cmdToIndex() const { return cmd.toInt(); }
|
2006-06-28 20:35:00 +02:00
|
|
|
|
2014-12-02 12:08:19 +01:00
|
|
|
bool isRead() const { return cmd.isRead(); }
|
|
|
|
bool isWrite() const { return cmd.isWrite(); }
|
|
|
|
bool isUpgrade() const { return cmd.isUpgrade(); }
|
|
|
|
bool isRequest() const { return cmd.isRequest(); }
|
|
|
|
bool isResponse() const { return cmd.isResponse(); }
|
mem: Do not rely on the NeedsWritable flag for responses
This patch removes the NeedsWritable flag for all responses, as it is
really only the request that needs a writable response. The response,
on the other hand, should in these cases always provide the line in a
writable state, as indicated by the hasSharers flag not being set.
When we send requests that has NeedsWritable set, the response will
always have the hasSharers flag not set. Additionally, there are cases
where the request did not have NeedsWritable set, and we still get a
writable response with the hasSharers flag not set. This never happens
on snoops, but is used by downstream caches to pass ownership
upstream.
As part of this patch, the affected response types are updated, and
the snoop filter is similarly modified to check only the hasSharers
flag (as it should). A sanity check is also added to the packet class,
asserting that we never look at the NeedsWritable flag for responses.
No regressions are affected.
2015-12-31 15:34:18 +01:00
|
|
|
bool needsWritable() const
|
|
|
|
{
|
|
|
|
// we should never check if a response needsWritable, the
|
|
|
|
// request has this flag, and for a response we should rather
|
|
|
|
// look at the hasSharers flag (if not set, the response is to
|
|
|
|
// be considered writable)
|
|
|
|
assert(isRequest());
|
|
|
|
return cmd.needsWritable();
|
|
|
|
}
|
2014-12-02 12:08:19 +01:00
|
|
|
bool needsResponse() const { return cmd.needsResponse(); }
|
|
|
|
bool isInvalidate() const { return cmd.isInvalidate(); }
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
bool isEviction() const { return cmd.isEviction(); }
|
2016-08-12 15:11:45 +02:00
|
|
|
bool fromCache() const { return cmd.fromCache(); }
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
bool isWriteback() const { return cmd.isWriteback(); }
|
2014-12-02 12:08:19 +01:00
|
|
|
bool hasData() const { return cmd.hasData(); }
|
2015-12-31 15:33:39 +01:00
|
|
|
bool hasRespData() const
|
|
|
|
{
|
|
|
|
MemCmd resp_cmd = cmd.responseCommand();
|
|
|
|
return resp_cmd.hasData();
|
|
|
|
}
|
2014-12-02 12:08:19 +01:00
|
|
|
bool isLLSC() const { return cmd.isLLSC(); }
|
|
|
|
bool isError() const { return cmd.isError(); }
|
|
|
|
bool isPrint() const { return cmd.isPrint(); }
|
|
|
|
bool isFlush() const { return cmd.isFlush(); }
|
2007-06-30 19:16:18 +02:00
|
|
|
|
2015-12-31 15:32:58 +01:00
|
|
|
//@{
|
|
|
|
/// Snoop flags
|
|
|
|
/**
|
|
|
|
* Set the cacheResponding flag. This is used by the caches to
|
|
|
|
* signal another cache that they are responding to a request. A
|
|
|
|
* cache will only respond to snoops if it has the line in either
|
|
|
|
* Modified or Owned state. Note that on snoop hits we always pass
|
|
|
|
* the line as Modified and never Owned. In the case of an Owned
|
|
|
|
* line we proceed to invalidate all other copies.
|
|
|
|
*
|
|
|
|
* On a cache fill (see Cache::handleFill), we check hasSharers
|
|
|
|
* first, ignoring the cacheResponding flag if hasSharers is set.
|
|
|
|
* A line is consequently allocated as:
|
|
|
|
*
|
|
|
|
* hasSharers cacheResponding state
|
|
|
|
* true false Shared
|
|
|
|
* true true Shared
|
|
|
|
* false false Exclusive
|
|
|
|
* false true Modified
|
|
|
|
*/
|
|
|
|
void setCacheResponding()
|
2014-12-02 12:07:46 +01:00
|
|
|
{
|
|
|
|
assert(isRequest());
|
2015-12-31 15:32:58 +01:00
|
|
|
assert(!flags.isSet(CACHE_RESPONDING));
|
|
|
|
flags.set(CACHE_RESPONDING);
|
2014-12-02 12:07:46 +01:00
|
|
|
}
|
2015-12-31 15:32:58 +01:00
|
|
|
bool cacheResponding() const { return flags.isSet(CACHE_RESPONDING); }
|
|
|
|
/**
|
|
|
|
* On fills, the hasSharers flag is used by the caches in
|
|
|
|
* combination with the cacheResponding flag, as clarified
|
|
|
|
* above. If the hasSharers flag is not set, the packet is passing
|
|
|
|
* writable. Thus, a response from a memory passes the line as
|
|
|
|
* writable by default.
|
|
|
|
*
|
|
|
|
* The hasSharers flag is also used by upstream caches to inform a
|
|
|
|
* downstream cache that they have the block (by calling
|
|
|
|
* setHasSharers on snoop request packets that hit in upstream
|
|
|
|
* cachs tags or MSHRs). If the snoop packet has sharers, a
|
|
|
|
* downstream cache is prevented from passing a dirty line upwards
|
|
|
|
* if it was not explicitly asked for a writable copy. See
|
|
|
|
* Cache::satisfyCpuSideRequest.
|
|
|
|
*
|
|
|
|
* The hasSharers flag is also used on writebacks, in
|
|
|
|
* combination with the WritbackClean or WritebackDirty commands,
|
|
|
|
* to allocate the block downstream either as:
|
|
|
|
*
|
|
|
|
* command hasSharers state
|
|
|
|
* WritebackDirty false Modified
|
|
|
|
* WritebackDirty true Owned
|
|
|
|
* WritebackClean false Exclusive
|
|
|
|
* WritebackClean true Shared
|
|
|
|
*/
|
|
|
|
void setHasSharers() { flags.set(HAS_SHARERS); }
|
|
|
|
bool hasSharers() const { return flags.isSet(HAS_SHARERS); }
|
|
|
|
//@}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The express snoop flag is used for two purposes. Firstly, it is
|
|
|
|
* used to bypass flow control for normal (non-snoop) requests
|
|
|
|
* going downstream in the memory system. In cases where a cache
|
|
|
|
* is responding to a snoop from another cache (it had a dirty
|
|
|
|
* line), but the line is not writable (and there are possibly
|
|
|
|
* other copies), the express snoop flag is set by the downstream
|
|
|
|
* cache to invalidate all other copies in zero time. Secondly,
|
|
|
|
* the express snoop flag is also set to be able to distinguish
|
|
|
|
* snoop packets that came from a downstream cache, rather than
|
|
|
|
* snoop packets from neighbouring caches.
|
|
|
|
*/
|
|
|
|
void setExpressSnoop() { flags.set(EXPRESS_SNOOP); }
|
|
|
|
bool isExpressSnoop() const { return flags.isSet(EXPRESS_SNOOP); }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* On responding to a snoop request (which only happens for
|
|
|
|
* Modified or Owned lines), make sure that we can transform an
|
|
|
|
* Owned response to a Modified one. If this flag is not set, the
|
|
|
|
* responding cache had the line in the Owned state, and there are
|
|
|
|
* possibly other Shared copies in the memory system. A downstream
|
|
|
|
* cache helps in orchestrating the invalidation of these copies
|
|
|
|
* by sending out the appropriate express snoops.
|
|
|
|
*/
|
|
|
|
void setResponderHadWritable()
|
|
|
|
{
|
|
|
|
assert(cacheResponding());
|
2016-12-05 22:48:24 +01:00
|
|
|
assert(!responderHadWritable());
|
2015-12-31 15:32:58 +01:00
|
|
|
flags.set(RESPONDER_HAD_WRITABLE);
|
|
|
|
}
|
|
|
|
bool responderHadWritable() const
|
|
|
|
{ return flags.isSet(RESPONDER_HAD_WRITABLE); }
|
|
|
|
|
2013-10-31 19:41:13 +01:00
|
|
|
void setSuppressFuncError() { flags.set(SUPPRESS_FUNC_ERROR); }
|
|
|
|
bool suppressFuncError() const { return flags.isSet(SUPPRESS_FUNC_ERROR); }
|
2015-03-27 09:55:54 +01:00
|
|
|
void setBlockCached() { flags.set(BLOCK_CACHED); }
|
|
|
|
bool isBlockCached() const { return flags.isSet(BLOCK_CACHED); }
|
2015-07-03 16:14:37 +02:00
|
|
|
void clearBlockCached() { flags.clear(BLOCK_CACHED); }
|
2007-07-16 05:11:06 +02:00
|
|
|
|
2007-06-30 19:16:18 +02:00
|
|
|
// Network error conditions... encapsulate them as methods since
|
|
|
|
// their encoding keeps changing (from result field to command
|
|
|
|
// field, etc.)
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
setBadAddress()
|
|
|
|
{
|
|
|
|
assert(isResponse());
|
|
|
|
cmd = MemCmd::BadAddressError;
|
|
|
|
}
|
|
|
|
|
2007-08-27 06:45:40 +02:00
|
|
|
void copyError(Packet *pkt) { assert(pkt->isError()); cmd = pkt->cmd; }
|
2006-06-30 16:25:25 +02:00
|
|
|
|
2015-08-24 11:03:45 +02:00
|
|
|
Addr getAddr() const { assert(flags.isSet(VALID_ADDR)); return addr; }
|
2012-09-25 18:49:40 +02:00
|
|
|
/**
|
|
|
|
* Update the address of this packet mid-transaction. This is used
|
|
|
|
* by the address mapper to change an already set address to a new
|
|
|
|
* one based on the system configuration. It is intended to remap
|
|
|
|
* an existing address, so it asserts that the current address is
|
|
|
|
* valid.
|
|
|
|
*/
|
2015-08-24 11:03:45 +02:00
|
|
|
void setAddr(Addr _addr) { assert(flags.isSet(VALID_ADDR)); addr = _addr; }
|
2012-09-25 18:49:40 +02:00
|
|
|
|
2015-08-24 11:03:45 +02:00
|
|
|
unsigned getSize() const { assert(flags.isSet(VALID_SIZE)); return size; }
|
2015-07-30 09:41:38 +02:00
|
|
|
|
|
|
|
Addr getOffset(unsigned int blk_size) const
|
|
|
|
{
|
|
|
|
return getAddr() & Addr(blk_size - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
Addr getBlockAddr(unsigned int blk_size) const
|
|
|
|
{
|
|
|
|
return getAddr() & ~(Addr(blk_size - 1));
|
|
|
|
}
|
2008-11-10 20:51:17 +01:00
|
|
|
|
2015-08-24 11:03:45 +02:00
|
|
|
bool isSecure() const
|
|
|
|
{
|
|
|
|
assert(flags.isSet(VALID_ADDR));
|
|
|
|
return _isSecure;
|
|
|
|
}
|
2014-01-24 22:29:30 +01:00
|
|
|
|
2016-01-19 19:57:50 +01:00
|
|
|
/**
|
|
|
|
* Accessor function to atomic op.
|
|
|
|
*/
|
|
|
|
AtomicOpFunctor *getAtomicOp() const { return req->getAtomicOpFunctor(); }
|
|
|
|
bool isAtomicOp() const { return req->isAtomic(); }
|
|
|
|
|
2010-08-20 20:46:12 +02:00
|
|
|
/**
|
|
|
|
* It has been determined that the SC packet should successfully update
|
2015-07-30 09:41:38 +02:00
|
|
|
* memory. Therefore, convert this SC packet to a normal write.
|
2010-08-20 20:46:12 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
convertScToWrite()
|
|
|
|
{
|
|
|
|
assert(isLLSC());
|
|
|
|
assert(isWrite());
|
|
|
|
cmd = MemCmd::WriteReq;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-07-30 09:41:38 +02:00
|
|
|
* When ruby is in use, Ruby will monitor the cache line and the
|
|
|
|
* phys memory should treat LL ops as normal reads.
|
2010-08-20 20:46:12 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
convertLlToRead()
|
|
|
|
{
|
|
|
|
assert(isLLSC());
|
|
|
|
assert(isRead());
|
|
|
|
cmd = MemCmd::ReadReq;
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
2015-07-30 09:41:38 +02:00
|
|
|
* Constructor. Note that a Request object must be constructed
|
2015-08-24 11:03:45 +02:00
|
|
|
* first, but the Requests's physical address and size fields need
|
|
|
|
* not be valid. The command must be supplied.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
2014-12-02 12:07:50 +01:00
|
|
|
Packet(const RequestPtr _req, MemCmd _cmd)
|
2015-08-24 11:03:45 +02:00
|
|
|
: cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
|
2015-09-25 13:13:54 +02:00
|
|
|
size(0), headerDelay(0), snoopDelay(0), payloadDelay(0),
|
2013-02-19 11:56:06 +01:00
|
|
|
senderState(NULL)
|
2015-08-24 11:03:45 +02:00
|
|
|
{
|
|
|
|
if (req->hasPaddr()) {
|
|
|
|
addr = req->getPaddr();
|
|
|
|
flags.set(VALID_ADDR);
|
|
|
|
_isSecure = req->isSecure();
|
|
|
|
}
|
|
|
|
if (req->hasSize()) {
|
|
|
|
size = req->getSize();
|
|
|
|
flags.set(VALID_SIZE);
|
|
|
|
}
|
|
|
|
}
|
2006-06-29 22:07:19 +02:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
2015-08-24 11:03:45 +02:00
|
|
|
* Alternate constructor if you are trying to create a packet with
|
|
|
|
* a request that is for a whole block, not the address from the
|
|
|
|
* req. this allows for overriding the size/addr of the req.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
2015-08-24 11:03:45 +02:00
|
|
|
Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize)
|
|
|
|
: cmd(_cmd), req(_req), data(nullptr), addr(0), _isSecure(false),
|
2015-09-25 13:13:54 +02:00
|
|
|
headerDelay(0), snoopDelay(0), payloadDelay(0),
|
2013-02-19 11:56:06 +01:00
|
|
|
senderState(NULL)
|
2015-08-24 11:03:45 +02:00
|
|
|
{
|
|
|
|
if (req->hasPaddr()) {
|
|
|
|
addr = req->getPaddr() & ~(_blkSize - 1);
|
|
|
|
flags.set(VALID_ADDR);
|
|
|
|
_isSecure = req->isSecure();
|
|
|
|
}
|
|
|
|
size = _blkSize;
|
|
|
|
flags.set(VALID_SIZE);
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Alternate constructor for copying a packet. Copy all fields
|
2007-07-14 22:14:53 +02:00
|
|
|
* *except* if the original packet's data was dynamic, don't copy
|
|
|
|
* that, as we can't guarantee that the new packet's lifetime is
|
|
|
|
* less than that of the original packet. In this case the new
|
2008-11-10 20:51:17 +01:00
|
|
|
* packet should allocate its own data.
|
|
|
|
*/
|
2015-07-04 17:43:46 +02:00
|
|
|
Packet(const PacketPtr pkt, bool clear_flags, bool alloc_data)
|
2008-11-10 20:51:17 +01:00
|
|
|
: cmd(pkt->cmd), req(pkt->req),
|
2014-12-02 12:07:54 +01:00
|
|
|
data(nullptr),
|
2014-01-24 22:29:30 +01:00
|
|
|
addr(pkt->addr), _isSecure(pkt->_isSecure), size(pkt->size),
|
2015-03-02 10:00:52 +01:00
|
|
|
bytesValid(pkt->bytesValid),
|
2015-02-11 16:23:47 +01:00
|
|
|
headerDelay(pkt->headerDelay),
|
2015-09-25 13:13:54 +02:00
|
|
|
snoopDelay(0),
|
2015-02-11 16:23:47 +01:00
|
|
|
payloadDelay(pkt->payloadDelay),
|
2013-02-19 11:56:06 +01:00
|
|
|
senderState(pkt->senderState)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2014-12-02 12:07:54 +01:00
|
|
|
if (!clear_flags)
|
2008-11-10 20:51:17 +01:00
|
|
|
flags.set(pkt->flags & COPY_FLAGS);
|
|
|
|
|
2015-08-24 11:03:45 +02:00
|
|
|
flags.set(pkt->flags & (VALID_ADDR|VALID_SIZE));
|
|
|
|
|
2014-12-02 12:07:54 +01:00
|
|
|
// should we allocate space for data, or not, the express
|
|
|
|
// snoops do not need to carry any data as they only serve to
|
|
|
|
// co-ordinate state changes
|
|
|
|
if (alloc_data) {
|
|
|
|
// even if asked to allocate data, if the original packet
|
|
|
|
// holds static data, then the sender will not be doing
|
|
|
|
// any memcpy on receiving the response, thus we simply
|
|
|
|
// carry the pointer forward
|
|
|
|
if (pkt->flags.isSet(STATIC_DATA)) {
|
|
|
|
data = pkt->data;
|
|
|
|
flags.set(STATIC_DATA);
|
|
|
|
} else {
|
|
|
|
allocate();
|
|
|
|
}
|
|
|
|
}
|
2014-05-13 19:20:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-02-11 19:48:50 +01:00
|
|
|
* Generate the appropriate read MemCmd based on the Request flags.
|
2014-05-13 19:20:48 +02:00
|
|
|
*/
|
2015-02-11 19:48:50 +01:00
|
|
|
static MemCmd
|
|
|
|
makeReadCmd(const RequestPtr req)
|
2014-05-13 19:20:48 +02:00
|
|
|
{
|
2015-02-11 19:48:50 +01:00
|
|
|
if (req->isLLSC())
|
|
|
|
return MemCmd::LoadLockedReq;
|
|
|
|
else if (req->isPrefetch())
|
|
|
|
return MemCmd::SoftPFReq;
|
|
|
|
else
|
|
|
|
return MemCmd::ReadReq;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Generate the appropriate write MemCmd based on the Request flags.
|
|
|
|
*/
|
|
|
|
static MemCmd
|
|
|
|
makeWriteCmd(const RequestPtr req)
|
|
|
|
{
|
|
|
|
if (req->isLLSC())
|
|
|
|
return MemCmd::StoreCondReq;
|
|
|
|
else if (req->isSwap())
|
|
|
|
return MemCmd::SwapReq;
|
|
|
|
else
|
|
|
|
return MemCmd::WriteReq;
|
2014-05-13 19:20:48 +02:00
|
|
|
}
|
2012-01-10 01:10:05 +01:00
|
|
|
|
2014-05-13 19:20:48 +02:00
|
|
|
/**
|
|
|
|
* Constructor-like methods that return Packets based on Request objects.
|
2015-02-11 19:48:50 +01:00
|
|
|
* Fine-tune the MemCmd type if it's not a vanilla read or write.
|
2014-05-13 19:20:48 +02:00
|
|
|
*/
|
|
|
|
static PacketPtr
|
2014-12-02 12:07:50 +01:00
|
|
|
createRead(const RequestPtr req)
|
2014-05-13 19:20:48 +02:00
|
|
|
{
|
2015-02-11 19:48:50 +01:00
|
|
|
return new Packet(req, makeReadCmd(req));
|
2014-05-13 19:20:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static PacketPtr
|
2014-12-02 12:07:50 +01:00
|
|
|
createWrite(const RequestPtr req)
|
2014-05-13 19:20:48 +02:00
|
|
|
{
|
2015-02-11 19:48:50 +01:00
|
|
|
return new Packet(req, makeWriteCmd(req));
|
2006-05-26 20:17:33 +02:00
|
|
|
}
|
2006-04-12 23:46:25 +02:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* clean up packet variables
|
|
|
|
*/
|
2006-04-25 01:31:50 +02:00
|
|
|
~Packet()
|
2008-03-24 06:08:02 +01:00
|
|
|
{
|
2015-06-09 15:21:18 +02:00
|
|
|
// Delete the request object if this is a request packet which
|
|
|
|
// does not need a response, because the requester will not get
|
|
|
|
// a chance. If the request packet needs a response then the
|
|
|
|
// request will be deleted on receipt of the response
|
|
|
|
// packet. We also make sure to never delete the request for
|
|
|
|
// express snoops, even for cases when responses are not
|
|
|
|
// needed (CleanEvict and Writeback), since the snoop packet
|
|
|
|
// re-uses the same request.
|
|
|
|
if (req && isRequest() && !needsResponse() &&
|
|
|
|
!isExpressSnoop()) {
|
2008-03-24 06:08:02 +01:00
|
|
|
delete req;
|
2015-06-09 15:21:18 +02:00
|
|
|
}
|
2008-11-10 20:51:17 +01:00
|
|
|
deleteData();
|
2008-03-24 06:08:02 +01:00
|
|
|
}
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
/**
|
|
|
|
* Take a request packet and modify it in place to be suitable for
|
2015-01-22 11:01:31 +01:00
|
|
|
* returning as a response to that request.
|
2007-06-18 02:27:53 +02:00
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
makeResponse()
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2006-05-31 04:30:42 +02:00
|
|
|
assert(needsResponse());
|
2006-07-06 22:52:05 +02:00
|
|
|
assert(isRequest());
|
2007-02-07 19:53:37 +01:00
|
|
|
cmd = cmd.responseCommand();
|
2008-11-14 13:55:30 +01:00
|
|
|
|
2010-06-17 00:25:57 +02:00
|
|
|
// responses are never express, even if the snoop that
|
|
|
|
// triggered them was
|
|
|
|
flags.clear(EXPRESS_SNOOP);
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
makeAtomicResponse()
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2007-06-30 19:16:18 +02:00
|
|
|
makeResponse();
|
2006-05-26 20:17:33 +02:00
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
makeTimingResponse()
|
2006-10-20 08:38:45 +02:00
|
|
|
{
|
2007-06-30 19:16:18 +02:00
|
|
|
makeResponse();
|
2006-10-06 05:28:03 +02:00
|
|
|
}
|
|
|
|
|
2011-07-01 02:49:26 +02:00
|
|
|
void
|
|
|
|
setFunctionalResponseStatus(bool success)
|
|
|
|
{
|
|
|
|
if (!success) {
|
|
|
|
if (isWrite()) {
|
|
|
|
cmd = MemCmd::FunctionalWriteError;
|
|
|
|
} else {
|
|
|
|
cmd = MemCmd::FunctionalReadError;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-24 11:03:45 +02:00
|
|
|
void
|
|
|
|
setSize(unsigned size)
|
|
|
|
{
|
|
|
|
assert(!flags.isSet(VALID_SIZE));
|
|
|
|
|
|
|
|
this->size = size;
|
|
|
|
flags.set(VALID_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-08-07 10:59:28 +02:00
|
|
|
public:
|
|
|
|
/**
|
|
|
|
* @{
|
|
|
|
* @name Data accessor mehtods
|
|
|
|
*/
|
|
|
|
|
2006-10-20 08:38:45 +02:00
|
|
|
/**
|
|
|
|
* Set the data pointer to the following value that should not be
|
2014-12-02 12:07:54 +01:00
|
|
|
* freed. Static data allows us to do a single memcpy even if
|
|
|
|
* multiple packets are required to get from source to destination
|
|
|
|
* and back. In essence the pointer is set calling dataStatic on
|
|
|
|
* the original packet, and whenever this packet is copied and
|
|
|
|
* forwarded the same pointer is passed on. When a packet
|
|
|
|
* eventually reaches the destination holding the data, it is
|
|
|
|
* copied once into the location originally set. On the way back
|
|
|
|
* to the source, no copies are necessary.
|
2006-10-20 08:38:45 +02:00
|
|
|
*/
|
2006-04-25 01:31:50 +02:00
|
|
|
template <typename T>
|
2006-10-20 08:38:45 +02:00
|
|
|
void
|
|
|
|
dataStatic(T *p)
|
|
|
|
{
|
2014-12-02 12:07:43 +01:00
|
|
|
assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
|
2006-10-20 08:38:45 +02:00
|
|
|
data = (PacketDataPtr)p;
|
2008-11-10 20:51:17 +01:00
|
|
|
flags.set(STATIC_DATA);
|
2006-10-20 08:38:45 +02:00
|
|
|
}
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2014-12-02 12:07:38 +01:00
|
|
|
/**
|
|
|
|
* Set the data pointer to the following value that should not be
|
|
|
|
* freed. This version of the function allows the pointer passed
|
|
|
|
* to us to be const. To avoid issues down the line we cast the
|
|
|
|
* constness away, the alternative would be to keep both a const
|
|
|
|
* and non-const data pointer and cleverly choose between
|
|
|
|
* them. Note that this is only allowed for static data.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void
|
|
|
|
dataStaticConst(const T *p)
|
|
|
|
{
|
2014-12-02 12:07:43 +01:00
|
|
|
assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
|
2014-12-02 12:07:38 +01:00
|
|
|
data = const_cast<PacketDataPtr>(p);
|
|
|
|
flags.set(STATIC_DATA);
|
|
|
|
}
|
|
|
|
|
2006-10-20 08:38:45 +02:00
|
|
|
/**
|
|
|
|
* Set the data pointer to a value that should have delete []
|
2014-12-02 12:07:54 +01:00
|
|
|
* called on it. Dynamic data is local to this packet, and as the
|
|
|
|
* packet travels from source to destination, forwarded packets
|
|
|
|
* will allocate their own data. When a packet reaches the final
|
|
|
|
* destination it will populate the dynamic data of that specific
|
|
|
|
* packet, and on the way back towards the source, memcpy will be
|
|
|
|
* invoked in every step where a new packet was created e.g. in
|
|
|
|
* the caches. Ultimately when the response reaches the source a
|
|
|
|
* final memcpy is needed to extract the data from the packet
|
|
|
|
* before it is deallocated.
|
2006-04-25 01:31:50 +02:00
|
|
|
*/
|
|
|
|
template <typename T>
|
2006-10-20 08:38:45 +02:00
|
|
|
void
|
|
|
|
dataDynamic(T *p)
|
|
|
|
{
|
2014-12-02 12:07:43 +01:00
|
|
|
assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
|
2006-10-20 08:38:45 +02:00
|
|
|
data = (PacketDataPtr)p;
|
2008-11-10 20:51:17 +01:00
|
|
|
flags.set(DYNAMIC_DATA);
|
2006-10-20 08:38:45 +02:00
|
|
|
}
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* get a pointer to the data ptr.
|
|
|
|
*/
|
2006-04-25 01:31:50 +02:00
|
|
|
template <typename T>
|
2006-10-20 08:38:45 +02:00
|
|
|
T*
|
2014-12-02 12:07:34 +01:00
|
|
|
getPtr()
|
2006-10-20 08:38:45 +02:00
|
|
|
{
|
2014-12-02 12:07:34 +01:00
|
|
|
assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
|
2006-10-20 08:38:45 +02:00
|
|
|
return (T*)data;
|
|
|
|
}
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2014-12-02 12:07:36 +01:00
|
|
|
template <typename T>
|
|
|
|
const T*
|
|
|
|
getConstPtr() const
|
|
|
|
{
|
|
|
|
assert(flags.isSet(STATIC_DATA|DYNAMIC_DATA));
|
|
|
|
return (const T*)data;
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
2015-08-07 10:59:28 +02:00
|
|
|
* Get the data in the packet byte swapped from big endian to
|
|
|
|
* host endian.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T getBE() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the data in the packet byte swapped from little endian to
|
|
|
|
* host endian.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T getLE() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the data in the packet byte swapped from the specified
|
|
|
|
* endianness.
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T get(ByteOrder endian) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the data in the packet byte swapped from guest to host
|
|
|
|
* endian.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
2006-04-25 01:31:50 +02:00
|
|
|
template <typename T>
|
2014-12-02 12:07:36 +01:00
|
|
|
T get() const;
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2015-08-07 10:59:28 +02:00
|
|
|
/** Set the value in the data pointer to v as big endian. */
|
|
|
|
template <typename T>
|
|
|
|
void setBE(T v);
|
|
|
|
|
|
|
|
/** Set the value in the data pointer to v as little endian. */
|
|
|
|
template <typename T>
|
|
|
|
void setLE(T v);
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
2015-08-07 10:59:28 +02:00
|
|
|
* Set the value in the data pointer to v using the specified
|
|
|
|
* endianness.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
2006-04-25 01:31:50 +02:00
|
|
|
template <typename T>
|
2015-08-07 10:59:28 +02:00
|
|
|
void set(T v, ByteOrder endian);
|
|
|
|
|
|
|
|
/** Set the value in the data pointer to v as guest endian. */
|
|
|
|
template <typename T>
|
2006-05-02 00:53:28 +02:00
|
|
|
void set(T v);
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
/**
|
|
|
|
* Copy data into the packet from the provided pointer.
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2014-12-02 12:07:36 +01:00
|
|
|
setData(const uint8_t *p)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2014-12-02 12:07:54 +01:00
|
|
|
// we should never be copying data onto itself, which means we
|
|
|
|
// must idenfity packets with static data, as they carry the
|
|
|
|
// same pointer from source to destination and back
|
|
|
|
assert(p != getPtr<uint8_t>() || flags.isSet(STATIC_DATA));
|
|
|
|
|
2010-09-30 16:35:19 +02:00
|
|
|
if (p != getPtr<uint8_t>())
|
2014-12-02 12:07:54 +01:00
|
|
|
// for packet with allocated dynamic data, we copy data from
|
|
|
|
// one to the other, e.g. a forwarded response to a response
|
2010-09-30 16:35:19 +02:00
|
|
|
std::memcpy(getPtr<uint8_t>(), p, getSize());
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Copy data into the packet from the provided block pointer,
|
|
|
|
* which is aligned to the given block size.
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2014-12-02 12:07:36 +01:00
|
|
|
setDataFromBlock(const uint8_t *blk_data, int blkSize)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
|
|
|
setData(blk_data + getOffset(blkSize));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Copy data from the packet to the provided block pointer, which
|
|
|
|
* is aligned to the given block size.
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2014-12-02 12:07:36 +01:00
|
|
|
writeData(uint8_t *p) const
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2014-12-02 12:07:36 +01:00
|
|
|
std::memcpy(p, getConstPtr<uint8_t>(), getSize());
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Copy data from the packet to the memory at the provided pointer.
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2014-12-02 12:07:36 +01:00
|
|
|
writeDataToBlock(uint8_t *blk_data, int blkSize) const
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
|
|
|
writeData(blk_data + getOffset(blkSize));
|
|
|
|
}
|
|
|
|
|
2006-10-20 08:38:45 +02:00
|
|
|
/**
|
|
|
|
* delete the data pointed to in the data pointer. Ok to call to
|
|
|
|
* matter how data was allocted.
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
deleteData()
|
|
|
|
{
|
2014-12-02 12:07:43 +01:00
|
|
|
if (flags.isSet(DYNAMIC_DATA))
|
2008-11-10 20:51:17 +01:00
|
|
|
delete [] data;
|
|
|
|
|
2014-12-02 12:07:43 +01:00
|
|
|
flags.clear(STATIC_DATA|DYNAMIC_DATA);
|
2008-11-10 20:51:17 +01:00
|
|
|
data = NULL;
|
|
|
|
}
|
2006-04-25 01:31:50 +02:00
|
|
|
|
2014-12-02 12:07:41 +01:00
|
|
|
/** Allocate memory for the packet. */
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
allocate()
|
|
|
|
{
|
2015-12-31 15:33:39 +01:00
|
|
|
// if either this command or the response command has a data
|
|
|
|
// payload, actually allocate space
|
|
|
|
if (hasData() || hasRespData()) {
|
|
|
|
assert(flags.noneSet(STATIC_DATA|DYNAMIC_DATA));
|
|
|
|
flags.set(DYNAMIC_DATA);
|
|
|
|
data = new uint8_t[getSize()];
|
|
|
|
}
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
|
|
|
|
2015-08-07 10:59:28 +02:00
|
|
|
/** @} */
|
|
|
|
|
|
|
|
private: // Private data accessor methods
|
|
|
|
/** Get the data in the packet without byte swapping. */
|
|
|
|
template <typename T>
|
|
|
|
T getRaw() const;
|
|
|
|
|
|
|
|
/** Set the value in the data pointer to v without byte swapping. */
|
|
|
|
template <typename T>
|
|
|
|
void setRaw(T v);
|
|
|
|
|
|
|
|
public:
|
2007-06-18 02:27:53 +02:00
|
|
|
/**
|
|
|
|
* Check a functional request against a memory value stored in
|
2014-12-02 12:07:52 +01:00
|
|
|
* another packet (i.e. an in-transit request or
|
|
|
|
* response). Returns true if the current packet is a read, and
|
|
|
|
* the other packet provides the data, which is then copied to the
|
|
|
|
* current packet. If the current packet is a write, and the other
|
|
|
|
* packet intersects this one, then we update the data
|
|
|
|
* accordingly.
|
2007-06-18 02:27:53 +02:00
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
bool
|
2014-12-02 12:07:52 +01:00
|
|
|
checkFunctional(PacketPtr other)
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2014-12-02 12:07:52 +01:00
|
|
|
// all packets that are carrying a payload should have a valid
|
|
|
|
// data pointer
|
2014-01-24 22:29:30 +01:00
|
|
|
return checkFunctional(other, other->getAddr(), other->isSecure(),
|
2014-12-02 12:07:52 +01:00
|
|
|
other->getSize(),
|
|
|
|
other->hasData() ?
|
|
|
|
other->getPtr<uint8_t>() : NULL);
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
2006-01-31 18:12:49 +01:00
|
|
|
|
2015-07-03 16:14:37 +02:00
|
|
|
/**
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
* Does the request need to check for cached copies of the same block
|
|
|
|
* in the memory hierarchy above.
|
2015-07-03 16:14:37 +02:00
|
|
|
**/
|
|
|
|
bool
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
mustCheckAbove() const
|
2015-07-03 16:14:37 +02:00
|
|
|
{
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
return cmd == MemCmd::HardPFReq || isEviction();
|
2015-07-03 16:14:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
* Is this packet a clean eviction, including both actual clean
|
|
|
|
* evict packets, but also clean writebacks.
|
|
|
|
*/
|
2015-07-03 16:14:37 +02:00
|
|
|
bool
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
isCleanEviction() const
|
2015-07-03 16:14:37 +02:00
|
|
|
{
|
mem: Add an option to perform clean writebacks from caches
This patch adds the necessary commands and cache functionality to
allow clean writebacks. This functionality is crucial, especially when
having exclusive (victim) caches. For example, if read-only L1
instruction caches are not sending clean writebacks, there will never
be any spills from the L1 to the L2. At the moment the cache model
defaults to not sending clean writebacks, and this should possibly be
re-evaluated.
The implementation of clean writebacks relies on a new packet command
WritebackClean, which acts much like a Writeback (renamed
WritebackDirty), and also much like a CleanEvict. On eviction of a
clean block the cache either sends a clean evict, or a clean
writeback, and if any copies are still cached upstream the clean
evict/writeback is dropped. Similarly, if a clean evict/writeback
reaches a cache where there are outstanding MSHRs for the block, the
packet is dropped. In the typical case though, the clean writeback
allocates a block in the downstream cache, and marks it writable if
the evicted block was writable.
The patch changes the O3_ARM_v7a L1 cache configuration and the
default L1 caches in config/common/Caches.py
2015-11-06 09:26:43 +01:00
|
|
|
return cmd == MemCmd::CleanEvict || cmd == MemCmd::WritebackClean;
|
2015-07-03 16:14:37 +02:00
|
|
|
}
|
|
|
|
|
2014-12-02 12:07:52 +01:00
|
|
|
/**
|
|
|
|
* Check a functional request against a memory value represented
|
|
|
|
* by a base/size pair and an associated data array. If the
|
|
|
|
* current packet is a read, it may be satisfied by the memory
|
|
|
|
* value. If the current packet is a write, it may update the
|
|
|
|
* memory value.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
checkFunctional(Printable *obj, Addr base, bool is_secure, int size,
|
|
|
|
uint8_t *_data);
|
|
|
|
|
2008-01-02 22:46:22 +01:00
|
|
|
/**
|
|
|
|
* Push label for PrintReq (safe to call unconditionally).
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
pushLabel(const std::string &lbl)
|
|
|
|
{
|
|
|
|
if (isPrint())
|
|
|
|
safe_cast<PrintReqState*>(senderState)->pushLabel(lbl);
|
2008-01-02 21:20:15 +01:00
|
|
|
}
|
|
|
|
|
2008-01-02 22:46:22 +01:00
|
|
|
/**
|
|
|
|
* Pop label for PrintReq (safe to call unconditionally).
|
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
|
|
|
popLabel()
|
|
|
|
{
|
|
|
|
if (isPrint())
|
|
|
|
safe_cast<PrintReqState*>(senderState)->popLabel();
|
2008-01-02 21:20:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void print(std::ostream &o, int verbosity = 0,
|
|
|
|
const std::string &prefix = "") const;
|
2013-04-22 19:20:33 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* A no-args wrapper of print(std::ostream...)
|
|
|
|
* meant to be invoked from DPRINTFs
|
|
|
|
* avoiding string overheads in fast mode
|
|
|
|
* @return string with the request's type and start<->end addresses
|
|
|
|
*/
|
|
|
|
std::string print() const;
|
2008-01-02 21:20:15 +01:00
|
|
|
};
|
2006-10-12 20:15:09 +02:00
|
|
|
|
2006-01-31 18:12:49 +01:00
|
|
|
#endif //__MEM_PACKET_HH
|