2006-01-31 18:12:49 +01:00
|
|
|
/*
|
2014-01-24 22:29:30 +01:00
|
|
|
* Copyright (c) 2012-2013 ARM Limited
|
2012-11-02 17:32:01 +01:00
|
|
|
* All rights reserved
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
2006-01-31 18:12:49 +01:00
|
|
|
* Copyright (c) 2002-2005 The Regents of The University of Michigan
|
2015-07-20 16:15:18 +02:00
|
|
|
* Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
|
2006-01-31 18:12:49 +01:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2006-06-01 01:26:56 +02:00
|
|
|
*
|
|
|
|
* Authors: Ron Dreslinski
|
|
|
|
* Steve Reinhardt
|
|
|
|
* Ali Saidi
|
2006-01-31 18:12:49 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2006-08-15 01:25:07 +02:00
|
|
|
* @file
|
|
|
|
* Declaration of a request, the overall memory request consisting of
|
2006-01-31 18:12:49 +01:00
|
|
|
the parts of the request that are persistent throughout the transaction.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __MEM_REQUEST_HH__
|
|
|
|
#define __MEM_REQUEST_HH__
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
#include <cassert>
|
2012-02-12 23:07:39 +01:00
|
|
|
#include <climits>
|
2008-11-10 20:51:17 +01:00
|
|
|
|
|
|
|
#include "base/flags.hh"
|
2008-11-14 08:30:37 +01:00
|
|
|
#include "base/misc.hh"
|
2009-05-17 23:34:50 +02:00
|
|
|
#include "base/types.hh"
|
2015-12-07 23:42:15 +01:00
|
|
|
#include "cpu/inst_seq.hh"
|
2007-03-06 20:13:43 +01:00
|
|
|
#include "sim/core.hh"
|
2006-02-15 20:21:09 +01:00
|
|
|
|
2012-11-02 17:32:01 +01:00
|
|
|
/**
|
|
|
|
* Special TaskIds that are used for per-context-switch stats dumps
|
|
|
|
* and Cache Occupancy. Having too many tasks seems to be a problem
|
|
|
|
* with vector stats. 1024 seems to be a reasonable number that
|
|
|
|
* doesn't cause a problem with stats and is large enough to realistic
|
|
|
|
* benchmarks (Linux/Android boot, BBench, etc.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
namespace ContextSwitchTaskId {
|
|
|
|
enum TaskId {
|
|
|
|
MaxNormalTaskId = 1021, /* Maximum number of normal tasks */
|
|
|
|
Prefetcher = 1022, /* For cache lines brought in by prefetcher */
|
|
|
|
DMA = 1023, /* Mostly Table Walker */
|
|
|
|
Unknown = 1024,
|
|
|
|
NumTaskId
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2006-02-15 20:21:09 +01:00
|
|
|
class Request;
|
|
|
|
|
|
|
|
typedef Request* RequestPtr;
|
2012-02-12 23:07:38 +01:00
|
|
|
typedef uint16_t MasterID;
|
2006-02-15 20:21:09 +01:00
|
|
|
|
2012-06-05 07:23:08 +02:00
|
|
|
class Request
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
|
|
|
public:
|
2015-08-07 10:55:38 +02:00
|
|
|
typedef uint32_t FlagsType;
|
2013-10-15 13:26:34 +02:00
|
|
|
typedef uint8_t ArchFlagsType;
|
2008-11-10 20:51:17 +01:00
|
|
|
typedef ::Flags<FlagsType> Flags;
|
|
|
|
|
2015-07-03 16:14:36 +02:00
|
|
|
enum : FlagsType {
|
|
|
|
/**
|
|
|
|
* Architecture specific flags.
|
|
|
|
*
|
|
|
|
* These bits int the flag field are reserved for
|
|
|
|
* architecture-specific code. For example, SPARC uses them to
|
|
|
|
* represent ASIs.
|
|
|
|
*/
|
2015-08-07 10:55:38 +02:00
|
|
|
ARCH_BITS = 0x000000FF,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** The request was an instruction fetch. */
|
2015-08-07 10:55:38 +02:00
|
|
|
INST_FETCH = 0x00000100,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** The virtual address is also the physical address. */
|
2015-08-07 10:55:38 +02:00
|
|
|
PHYSICAL = 0x00000200,
|
2015-07-03 16:14:36 +02:00
|
|
|
/**
|
|
|
|
* The request is to an uncacheable address.
|
|
|
|
*
|
|
|
|
* @note Uncacheable accesses may be reordered by CPU models. The
|
|
|
|
* STRICT_ORDER flag should be set if such reordering is
|
|
|
|
* undesirable.
|
|
|
|
*/
|
2015-08-07 10:55:38 +02:00
|
|
|
UNCACHEABLE = 0x00000400,
|
2015-07-03 16:14:36 +02:00
|
|
|
/**
|
|
|
|
* The request is required to be strictly ordered by <i>CPU
|
|
|
|
* models</i> and is non-speculative.
|
|
|
|
*
|
|
|
|
* A strictly ordered request is guaranteed to never be
|
|
|
|
* re-ordered or executed speculatively by a CPU model. The
|
|
|
|
* memory system may still reorder requests in caches unless
|
|
|
|
* the UNCACHEABLE flag is set as well.
|
|
|
|
*/
|
2015-08-07 10:55:38 +02:00
|
|
|
STRICT_ORDER = 0x00000800,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** This request is to a memory mapped register. */
|
2015-08-07 10:55:38 +02:00
|
|
|
MMAPPED_IPR = 0x00002000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** This request is made in privileged mode. */
|
2015-08-07 10:55:38 +02:00
|
|
|
PRIVILEGED = 0x00008000,
|
2015-07-03 16:14:36 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This is a write that is targeted and zeroing an entire
|
|
|
|
* cache block. There is no need for a read/modify/write
|
|
|
|
*/
|
2015-08-07 10:55:38 +02:00
|
|
|
CACHE_BLOCK_ZERO = 0x00010000,
|
2015-07-03 16:14:36 +02:00
|
|
|
|
|
|
|
/** The request should not cause a memory access. */
|
2015-08-07 10:55:38 +02:00
|
|
|
NO_ACCESS = 0x00080000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/**
|
|
|
|
* This request will lock or unlock the accessed memory. When
|
|
|
|
* used with a load, the access locks the particular chunk of
|
|
|
|
* memory. When used with a store, it unlocks. The rule is
|
|
|
|
* that locked accesses have to be made up of a locked load,
|
|
|
|
* some operation on the data, and then a locked store.
|
|
|
|
*/
|
2015-08-07 10:55:38 +02:00
|
|
|
LOCKED_RMW = 0x00100000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** The request is a Load locked/store conditional. */
|
2015-08-07 10:55:38 +02:00
|
|
|
LLSC = 0x00200000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** This request is for a memory swap. */
|
2015-08-07 10:55:38 +02:00
|
|
|
MEM_SWAP = 0x00400000,
|
|
|
|
MEM_SWAP_COND = 0x00800000,
|
2015-07-03 16:14:36 +02:00
|
|
|
|
|
|
|
/** The request is a prefetch. */
|
2015-08-07 10:55:38 +02:00
|
|
|
PREFETCH = 0x01000000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** The request should be prefetched into the exclusive state. */
|
2015-08-07 10:55:38 +02:00
|
|
|
PF_EXCLUSIVE = 0x02000000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** The request should be marked as LRU. */
|
2015-08-07 10:55:38 +02:00
|
|
|
EVICT_NEXT = 0x04000000,
|
2015-07-20 16:15:18 +02:00
|
|
|
/** The request should be marked with ACQUIRE. */
|
|
|
|
ACQUIRE = 0x00020000,
|
|
|
|
/** The request should be marked with RELEASE. */
|
|
|
|
RELEASE = 0x00040000,
|
2015-07-03 16:14:36 +02:00
|
|
|
|
2016-01-19 19:57:50 +01:00
|
|
|
/** The request is an atomic that returns data. */
|
|
|
|
ATOMIC_RETURN_OP = 0x40000000,
|
|
|
|
/** The request is an atomic that does not return data. */
|
|
|
|
ATOMIC_NO_RETURN_OP = 0x80000000,
|
|
|
|
|
2015-07-20 16:15:18 +02:00
|
|
|
/** The request should be marked with KERNEL.
|
|
|
|
* Used to indicate the synchronization associated with a GPU kernel
|
|
|
|
* launch or completion.
|
|
|
|
*/
|
|
|
|
KERNEL = 0x00001000,
|
|
|
|
|
2015-07-03 16:14:36 +02:00
|
|
|
/**
|
|
|
|
* The request should be handled by the generic IPR code (only
|
|
|
|
* valid together with MMAPPED_IPR)
|
|
|
|
*/
|
2015-08-07 10:55:38 +02:00
|
|
|
GENERIC_IPR = 0x08000000,
|
2015-07-03 16:14:36 +02:00
|
|
|
|
|
|
|
/** The request targets the secure memory space. */
|
2015-08-07 10:55:38 +02:00
|
|
|
SECURE = 0x10000000,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** The request is a page table walk */
|
2015-08-07 10:55:38 +02:00
|
|
|
PT_WALK = 0x20000000,
|
2015-07-03 16:14:36 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* These flags are *not* cleared when a Request object is
|
|
|
|
* reused (assigned a new address).
|
|
|
|
*/
|
|
|
|
STICKY_FLAGS = INST_FETCH
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Master Ids that are statically allocated
|
2012-02-12 23:07:38 +01:00
|
|
|
* @{*/
|
2015-07-03 16:14:36 +02:00
|
|
|
enum : MasterID {
|
|
|
|
/** This master id is used for writeback requests by the caches */
|
|
|
|
wbMasterId = 0,
|
|
|
|
/**
|
|
|
|
* This master id is used for functional requests that
|
|
|
|
* don't come from a particular device
|
|
|
|
*/
|
|
|
|
funcMasterId = 1,
|
|
|
|
/** This master id is used for message signaled interrupts */
|
|
|
|
intMasterId = 2,
|
|
|
|
/**
|
|
|
|
* Invalid master id for assertion checking only. It is
|
|
|
|
* invalid behavior to ever send this id as part of a request.
|
|
|
|
*/
|
|
|
|
invldMasterId = std::numeric_limits<MasterID>::max()
|
|
|
|
};
|
2012-02-12 23:07:38 +01:00
|
|
|
/** @} */
|
|
|
|
|
2015-07-20 16:15:18 +02:00
|
|
|
typedef uint32_t MemSpaceConfigFlagsType;
|
|
|
|
typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
|
|
|
|
|
|
|
|
enum : MemSpaceConfigFlagsType {
|
|
|
|
/** Has a synchronization scope been set? */
|
|
|
|
SCOPE_VALID = 0x00000001,
|
|
|
|
/** Access has Wavefront scope visibility */
|
|
|
|
WAVEFRONT_SCOPE = 0x00000002,
|
|
|
|
/** Access has Workgroup scope visibility */
|
|
|
|
WORKGROUP_SCOPE = 0x00000004,
|
|
|
|
/** Access has Device (e.g., GPU) scope visibility */
|
|
|
|
DEVICE_SCOPE = 0x00000008,
|
|
|
|
/** Access has System (e.g., CPU + GPU) scope visibility */
|
|
|
|
SYSTEM_SCOPE = 0x00000010,
|
|
|
|
|
|
|
|
/** Global Segment */
|
|
|
|
GLOBAL_SEGMENT = 0x00000020,
|
|
|
|
/** Group Segment */
|
|
|
|
GROUP_SEGMENT = 0x00000040,
|
|
|
|
/** Private Segment */
|
|
|
|
PRIVATE_SEGMENT = 0x00000080,
|
|
|
|
/** Kergarg Segment */
|
|
|
|
KERNARG_SEGMENT = 0x00000100,
|
|
|
|
/** Readonly Segment */
|
|
|
|
READONLY_SEGMENT = 0x00000200,
|
|
|
|
/** Spill Segment */
|
|
|
|
SPILL_SEGMENT = 0x00000400,
|
|
|
|
/** Arg Segment */
|
|
|
|
ARG_SEGMENT = 0x00000800,
|
|
|
|
};
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
private:
|
2009-04-21 03:40:00 +02:00
|
|
|
typedef uint8_t PrivateFlagsType;
|
|
|
|
typedef ::Flags<PrivateFlagsType> PrivateFlags;
|
2007-02-12 19:06:30 +01:00
|
|
|
|
2015-07-03 16:14:36 +02:00
|
|
|
enum : PrivateFlagsType {
|
|
|
|
/** Whether or not the size is valid. */
|
|
|
|
VALID_SIZE = 0x00000001,
|
|
|
|
/** Whether or not paddr is valid (has been written yet). */
|
|
|
|
VALID_PADDR = 0x00000002,
|
|
|
|
/** Whether or not the vaddr & asid are valid. */
|
|
|
|
VALID_VADDR = 0x00000004,
|
2015-12-07 23:42:15 +01:00
|
|
|
/** Whether or not the instruction sequence number is valid. */
|
|
|
|
VALID_INST_SEQ_NUM = 0x00000008,
|
2015-07-03 16:14:36 +02:00
|
|
|
/** Whether or not the pc is valid. */
|
|
|
|
VALID_PC = 0x00000010,
|
|
|
|
/** Whether or not the context ID is valid. */
|
|
|
|
VALID_CONTEXT_ID = 0x00000020,
|
|
|
|
/** Whether or not the sc result is valid. */
|
|
|
|
VALID_EXTRA_DATA = 0x00000080,
|
|
|
|
/**
|
|
|
|
* These flags are *not* cleared when a Request object is reused
|
|
|
|
* (assigned a new address).
|
|
|
|
*/
|
2016-04-05 19:39:21 +02:00
|
|
|
STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
|
2015-07-03 16:14:36 +02:00
|
|
|
};
|
2006-02-15 20:53:02 +01:00
|
|
|
|
2006-04-07 21:54:48 +02:00
|
|
|
private:
|
2015-01-22 11:00:53 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Set up a physical (e.g. device) request in a previously
|
|
|
|
* allocated Request object.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
|
|
|
|
{
|
|
|
|
_paddr = paddr;
|
|
|
|
_size = size;
|
|
|
|
_time = time;
|
|
|
|
_masterId = mid;
|
|
|
|
_flags.clear(~STICKY_FLAGS);
|
|
|
|
_flags.set(flags);
|
|
|
|
privateFlags.clear(~STICKY_PRIVATE_FLAGS);
|
|
|
|
privateFlags.set(VALID_PADDR|VALID_SIZE);
|
|
|
|
depth = 0;
|
|
|
|
accessDelta = 0;
|
|
|
|
//translateDelta = 0;
|
|
|
|
}
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/**
|
|
|
|
* The physical address of the request. Valid only if validPaddr
|
2008-11-10 20:51:17 +01:00
|
|
|
* is set.
|
|
|
|
*/
|
2009-08-02 07:50:10 +02:00
|
|
|
Addr _paddr;
|
2006-01-31 18:12:49 +01:00
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/**
|
|
|
|
* The size of the request. This field must be set when vaddr or
|
|
|
|
* paddr is written via setVirt() or setPhys(), so it is always
|
2008-11-10 20:51:17 +01:00
|
|
|
* valid as long as one of the address fields is valid.
|
|
|
|
*/
|
2015-01-22 11:00:53 +01:00
|
|
|
unsigned _size;
|
2006-02-15 20:53:02 +01:00
|
|
|
|
2012-02-12 23:07:38 +01:00
|
|
|
/** The requestor ID which is unique in the system for all ports
|
|
|
|
* that are capable of issuing a transaction
|
|
|
|
*/
|
|
|
|
MasterID _masterId;
|
|
|
|
|
2006-04-07 21:54:48 +02:00
|
|
|
/** Flag structure for the request. */
|
2009-08-02 07:50:10 +02:00
|
|
|
Flags _flags;
|
2006-01-31 20:20:39 +01:00
|
|
|
|
2015-07-20 16:15:18 +02:00
|
|
|
/** Memory space configuraiton flag structure for the request. */
|
|
|
|
MemSpaceConfigFlags _memSpaceConfigFlags;
|
|
|
|
|
2009-04-21 03:40:00 +02:00
|
|
|
/** Private flags for field validity checking. */
|
|
|
|
PrivateFlags privateFlags;
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/**
|
|
|
|
* The time this request was started. Used to calculate
|
2011-01-08 06:50:29 +01:00
|
|
|
* latencies. This field is set to curTick() any time paddr or vaddr
|
2008-11-10 20:51:17 +01:00
|
|
|
* is written.
|
|
|
|
*/
|
2009-05-30 00:30:16 +02:00
|
|
|
Tick _time;
|
2006-01-31 20:20:39 +01:00
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
/**
|
|
|
|
* The task id associated with this request
|
|
|
|
*/
|
|
|
|
uint32_t _taskId;
|
|
|
|
|
2006-01-31 20:20:39 +01:00
|
|
|
/** The address space ID. */
|
2009-08-02 07:50:10 +02:00
|
|
|
int _asid;
|
2006-11-29 23:11:10 +01:00
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/** The virtual address of the request. */
|
2009-08-02 07:50:10 +02:00
|
|
|
Addr _vaddr;
|
2006-01-31 20:20:39 +01:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Extra data for the request, such as the return value of
|
2007-02-12 19:06:30 +01:00
|
|
|
* store conditional or the compare value for a CAS. */
|
2009-08-02 07:50:10 +02:00
|
|
|
uint64_t _extraData;
|
2006-01-31 18:12:49 +01:00
|
|
|
|
2016-04-05 19:39:21 +02:00
|
|
|
/** The context ID (for statistics, locks, and wakeups). */
|
2015-08-07 10:59:13 +02:00
|
|
|
ContextID _contextId;
|
2006-01-31 18:12:49 +01:00
|
|
|
|
|
|
|
/** program counter of initiating access; for tracing/debugging */
|
2009-08-02 07:50:10 +02:00
|
|
|
Addr _pc;
|
2006-05-31 06:12:29 +02:00
|
|
|
|
2015-12-07 23:42:15 +01:00
|
|
|
/** Sequence number of the instruction that creates the request */
|
2015-12-07 23:42:16 +01:00
|
|
|
InstSeqNum _reqInstSeqNum;
|
2015-12-07 23:42:15 +01:00
|
|
|
|
2016-01-19 19:57:50 +01:00
|
|
|
/** A pointer to an atomic operation */
|
|
|
|
AtomicOpFunctor *atomicOpFunctor;
|
|
|
|
|
2006-04-07 21:54:48 +02:00
|
|
|
public:
|
2015-01-22 11:00:53 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Minimal constructor. No fields are initialized. (Note that
|
|
|
|
* _flags and privateFlags are cleared by Flags default
|
|
|
|
* constructor.)
|
2009-08-02 07:50:10 +02:00
|
|
|
*/
|
2006-05-31 06:12:29 +02:00
|
|
|
Request()
|
2014-09-09 10:36:31 +02:00
|
|
|
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
|
|
|
|
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
|
2016-04-05 19:39:21 +02:00
|
|
|
_extraData(0), _contextId(0), _pc(0),
|
2016-01-19 19:57:50 +01:00
|
|
|
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
|
|
|
|
accessDelta(0), depth(0)
|
2006-05-31 06:12:29 +02:00
|
|
|
{}
|
|
|
|
|
2015-12-07 23:42:15 +01:00
|
|
|
Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
|
2016-04-05 19:39:21 +02:00
|
|
|
InstSeqNum seq_num, ContextID cid)
|
2015-12-07 23:42:15 +01:00
|
|
|
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
|
|
|
|
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
|
2016-04-05 19:39:21 +02:00
|
|
|
_extraData(0), _contextId(0), _pc(0),
|
2016-01-19 19:57:50 +01:00
|
|
|
_reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
|
|
|
|
accessDelta(0), depth(0)
|
2015-12-07 23:42:15 +01:00
|
|
|
{
|
|
|
|
setPhys(paddr, size, flags, mid, curTick());
|
2016-04-05 19:39:21 +02:00
|
|
|
setContext(cid);
|
2015-12-07 23:42:15 +01:00
|
|
|
privateFlags.set(VALID_INST_SEQ_NUM);
|
|
|
|
}
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/**
|
|
|
|
* Constructor for physical (e.g. device) requests. Initializes
|
2011-01-08 06:50:29 +01:00
|
|
|
* just physical address, size, flags, and timestamp (to curTick()).
|
2015-01-22 11:00:53 +01:00
|
|
|
* These fields are adequate to perform a request.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
2015-01-22 11:00:53 +01:00
|
|
|
Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
|
2014-09-09 10:36:31 +02:00
|
|
|
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
|
|
|
|
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
|
2016-04-05 19:39:21 +02:00
|
|
|
_extraData(0), _contextId(0), _pc(0),
|
2016-01-19 19:57:50 +01:00
|
|
|
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
|
|
|
|
accessDelta(0), depth(0)
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2015-01-22 11:00:53 +01:00
|
|
|
setPhys(paddr, size, flags, mid, curTick());
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-05-31 06:12:29 +02:00
|
|
|
|
2015-01-22 11:00:53 +01:00
|
|
|
Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
|
2014-09-09 10:36:31 +02:00
|
|
|
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
|
|
|
|
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
|
2016-04-05 19:39:21 +02:00
|
|
|
_extraData(0), _contextId(0), _pc(0),
|
2016-01-19 19:57:50 +01:00
|
|
|
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
|
|
|
|
accessDelta(0), depth(0)
|
2009-05-30 00:30:16 +02:00
|
|
|
{
|
2012-02-12 23:07:38 +01:00
|
|
|
setPhys(paddr, size, flags, mid, time);
|
2009-05-30 00:30:16 +02:00
|
|
|
}
|
|
|
|
|
2015-01-22 11:00:53 +01:00
|
|
|
Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time,
|
|
|
|
Addr pc)
|
2014-09-09 10:36:31 +02:00
|
|
|
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
|
|
|
|
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
|
2016-04-05 19:39:21 +02:00
|
|
|
_extraData(0), _contextId(0), _pc(pc),
|
2016-01-19 19:57:50 +01:00
|
|
|
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
|
|
|
|
accessDelta(0), depth(0)
|
2010-01-30 05:29:23 +01:00
|
|
|
{
|
2012-02-12 23:07:38 +01:00
|
|
|
setPhys(paddr, size, flags, mid, time);
|
2010-01-30 05:29:23 +01:00
|
|
|
privateFlags.set(VALID_PC);
|
|
|
|
}
|
|
|
|
|
2015-01-22 11:00:53 +01:00
|
|
|
Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
|
2016-04-05 19:39:21 +02:00
|
|
|
Addr pc, ContextID cid)
|
2014-09-09 10:36:31 +02:00
|
|
|
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
|
|
|
|
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
|
2016-04-05 19:39:21 +02:00
|
|
|
_extraData(0), _contextId(0), _pc(0),
|
2016-01-19 19:57:50 +01:00
|
|
|
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
|
|
|
|
accessDelta(0), depth(0)
|
|
|
|
{
|
|
|
|
setVirt(asid, vaddr, size, flags, mid, pc);
|
2016-04-05 19:39:21 +02:00
|
|
|
setContext(cid);
|
2016-01-19 19:57:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Request(int asid, Addr vaddr, int size, Flags flags, MasterID mid, Addr pc,
|
|
|
|
int cid, ThreadID tid, AtomicOpFunctor *atomic_op)
|
|
|
|
: atomicOpFunctor(atomic_op)
|
2006-06-03 00:15:20 +02:00
|
|
|
{
|
2012-02-12 23:07:38 +01:00
|
|
|
setVirt(asid, vaddr, size, flags, mid, pc);
|
2016-04-05 19:39:21 +02:00
|
|
|
setContext(cid);
|
2006-06-03 00:15:20 +02:00
|
|
|
}
|
|
|
|
|
2016-01-19 19:57:50 +01:00
|
|
|
~Request()
|
|
|
|
{
|
|
|
|
if (hasAtomicOpFunctor()) {
|
|
|
|
delete atomicOpFunctor;
|
|
|
|
}
|
|
|
|
}
|
2007-06-21 19:50:35 +02:00
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/**
|
2016-04-05 19:39:21 +02:00
|
|
|
* Set up Context numbers.
|
2008-11-10 20:51:17 +01:00
|
|
|
*/
|
|
|
|
void
|
2016-04-05 19:39:21 +02:00
|
|
|
setContext(ContextID context_id)
|
2006-05-31 06:12:29 +02:00
|
|
|
{
|
2008-11-10 20:51:17 +01:00
|
|
|
_contextId = context_id;
|
2016-04-05 19:39:21 +02:00
|
|
|
privateFlags.set(VALID_CONTEXT_ID);
|
2006-05-31 06:12:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set up a virtual (e.g., CPU) request in a previously
|
2008-11-10 20:51:17 +01:00
|
|
|
* allocated Request object.
|
|
|
|
*/
|
|
|
|
void
|
2015-01-22 11:00:53 +01:00
|
|
|
setVirt(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
|
|
|
|
Addr pc)
|
2006-05-31 06:12:29 +02:00
|
|
|
{
|
2009-08-02 07:50:10 +02:00
|
|
|
_asid = asid;
|
|
|
|
_vaddr = vaddr;
|
|
|
|
_size = size;
|
2012-02-12 23:07:38 +01:00
|
|
|
_masterId = mid;
|
2009-08-02 07:50:10 +02:00
|
|
|
_pc = pc;
|
2011-01-08 06:50:29 +01:00
|
|
|
_time = curTick();
|
2008-11-10 20:51:17 +01:00
|
|
|
|
2009-08-02 07:50:10 +02:00
|
|
|
_flags.clear(~STICKY_FLAGS);
|
|
|
|
_flags.set(flags);
|
2009-04-21 03:40:00 +02:00
|
|
|
privateFlags.clear(~STICKY_PRIVATE_FLAGS);
|
|
|
|
privateFlags.set(VALID_VADDR|VALID_SIZE|VALID_PC);
|
2014-01-24 22:29:30 +01:00
|
|
|
depth = 0;
|
|
|
|
accessDelta = 0;
|
|
|
|
translateDelta = 0;
|
2006-05-31 06:12:29 +02:00
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
2013-06-18 16:10:22 +02:00
|
|
|
* Set just the physical address. This usually used to record the
|
|
|
|
* result of a translation. However, when using virtualized CPUs
|
|
|
|
* setPhys() is sometimes called to finalize a physical address
|
|
|
|
* without a virtual address, so we can't check if the virtual
|
|
|
|
* address is valid.
|
2006-05-31 06:12:29 +02:00
|
|
|
*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2009-08-02 07:50:10 +02:00
|
|
|
setPaddr(Addr paddr)
|
2006-05-31 06:12:29 +02:00
|
|
|
{
|
2009-08-02 07:50:10 +02:00
|
|
|
_paddr = paddr;
|
2009-04-21 03:40:00 +02:00
|
|
|
privateFlags.set(VALID_PADDR);
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
|
|
|
|
2008-11-14 08:30:37 +01:00
|
|
|
/**
|
|
|
|
* Generate two requests as if this request had been split into two
|
|
|
|
* pieces. The original request can't have been translated already.
|
|
|
|
*/
|
|
|
|
void splitOnVaddr(Addr split_addr, RequestPtr &req1, RequestPtr &req2)
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_VADDR));
|
|
|
|
assert(privateFlags.noneSet(VALID_PADDR));
|
2009-08-02 07:50:10 +02:00
|
|
|
assert(split_addr > _vaddr && split_addr < _vaddr + _size);
|
2015-01-22 11:00:53 +01:00
|
|
|
req1 = new Request(*this);
|
|
|
|
req2 = new Request(*this);
|
2009-08-02 07:50:10 +02:00
|
|
|
req1->_size = split_addr - _vaddr;
|
|
|
|
req2->_vaddr = split_addr;
|
|
|
|
req2->_size = _size - req1->_size;
|
2008-11-14 08:30:37 +01:00
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Accessor for paddr.
|
|
|
|
*/
|
2009-04-21 03:40:00 +02:00
|
|
|
bool
|
2014-12-02 12:07:48 +01:00
|
|
|
hasPaddr() const
|
2009-04-21 03:40:00 +02:00
|
|
|
{
|
|
|
|
return privateFlags.isSet(VALID_PADDR);
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
Addr
|
2014-12-02 12:07:48 +01:00
|
|
|
getPaddr() const
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_PADDR));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _paddr;
|
2006-05-31 06:12:29 +02:00
|
|
|
}
|
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
/**
|
|
|
|
* Time for the TLB/table walker to successfully translate this request.
|
|
|
|
*/
|
|
|
|
Tick translateDelta;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Access latency to complete this memory transaction not including
|
|
|
|
* translation time.
|
|
|
|
*/
|
|
|
|
Tick accessDelta;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Level of the cache hierachy where this request was responded to
|
|
|
|
* (e.g. 0 = L1; 1 = L2).
|
|
|
|
*/
|
2014-12-02 12:07:48 +01:00
|
|
|
mutable int depth;
|
2014-01-24 22:29:30 +01:00
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/**
|
|
|
|
* Accessor for size.
|
|
|
|
*/
|
2009-04-21 03:40:00 +02:00
|
|
|
bool
|
2014-12-02 12:07:48 +01:00
|
|
|
hasSize() const
|
2009-04-21 03:40:00 +02:00
|
|
|
{
|
|
|
|
return privateFlags.isSet(VALID_SIZE);
|
|
|
|
}
|
|
|
|
|
2015-03-23 11:57:34 +01:00
|
|
|
unsigned
|
2014-12-02 12:07:48 +01:00
|
|
|
getSize() const
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_SIZE));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _size;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-05-31 06:12:29 +02:00
|
|
|
|
|
|
|
/** Accessor for time. */
|
2008-11-10 20:51:17 +01:00
|
|
|
Tick
|
2009-05-30 00:30:16 +02:00
|
|
|
time() const
|
|
|
|
{
|
|
|
|
assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
|
|
|
|
return _time;
|
|
|
|
}
|
|
|
|
|
2016-01-19 19:57:50 +01:00
|
|
|
/**
|
|
|
|
* Accessor for atomic-op functor.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
hasAtomicOpFunctor()
|
|
|
|
{
|
|
|
|
return atomicOpFunctor != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
AtomicOpFunctor *
|
|
|
|
getAtomicOpFunctor()
|
|
|
|
{
|
|
|
|
assert(atomicOpFunctor != NULL);
|
|
|
|
return atomicOpFunctor;
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
/** Accessor for flags. */
|
|
|
|
Flags
|
|
|
|
getFlags()
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _flags;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
|
|
|
|
2009-08-02 07:50:13 +02:00
|
|
|
/** Note that unlike other accessors, this function sets *specific
|
2015-07-03 16:14:36 +02:00
|
|
|
flags* (ORs them in); it does not assign its argument to the
|
|
|
|
_flags field. Thus this method should rightly be called
|
|
|
|
setFlags() and not just flags(). */
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2009-08-02 07:50:10 +02:00
|
|
|
setFlags(Flags flags)
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
|
2009-08-02 07:50:10 +02:00
|
|
|
_flags.set(flags);
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
|
|
|
|
2015-07-20 16:15:18 +02:00
|
|
|
void
|
|
|
|
setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
|
|
|
|
{
|
|
|
|
assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
|
|
|
|
_memSpaceConfigFlags.set(extraFlags);
|
|
|
|
}
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/** Accessor function for vaddr.*/
|
2014-09-09 10:36:33 +02:00
|
|
|
bool
|
|
|
|
hasVaddr() const
|
|
|
|
{
|
|
|
|
return privateFlags.isSet(VALID_VADDR);
|
|
|
|
}
|
|
|
|
|
2008-11-10 20:51:17 +01:00
|
|
|
Addr
|
2014-09-09 10:36:33 +02:00
|
|
|
getVaddr() const
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_VADDR));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _vaddr;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-05-31 06:12:29 +02:00
|
|
|
|
2012-02-12 23:07:38 +01:00
|
|
|
/** Accesssor for the requestor id. */
|
|
|
|
MasterID
|
2014-12-02 12:07:48 +01:00
|
|
|
masterId() const
|
2012-02-12 23:07:38 +01:00
|
|
|
{
|
|
|
|
return _masterId;
|
|
|
|
}
|
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
uint32_t
|
|
|
|
taskId() const
|
|
|
|
{
|
|
|
|
return _taskId;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
taskId(uint32_t id) {
|
|
|
|
_taskId = id;
|
|
|
|
}
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/** Accessor function for asid.*/
|
2008-11-10 20:51:17 +01:00
|
|
|
int
|
2014-12-02 12:07:48 +01:00
|
|
|
getAsid() const
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_VADDR));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _asid;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-05-31 06:12:29 +02:00
|
|
|
|
2011-09-13 19:06:13 +02:00
|
|
|
/** Accessor function for asid.*/
|
|
|
|
void
|
|
|
|
setAsid(int asid)
|
|
|
|
{
|
|
|
|
_asid = asid;
|
|
|
|
}
|
|
|
|
|
2013-10-15 13:26:34 +02:00
|
|
|
/** Accessor function for architecture-specific flags.*/
|
|
|
|
ArchFlagsType
|
2014-12-02 12:07:48 +01:00
|
|
|
getArchFlags() const
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2013-10-15 13:26:34 +02:00
|
|
|
assert(privateFlags.isSet(VALID_PADDR|VALID_VADDR));
|
|
|
|
return _flags & ARCH_BITS;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-11-29 23:11:10 +01:00
|
|
|
|
2006-06-06 20:06:30 +02:00
|
|
|
/** Accessor function to check if sc result is valid. */
|
2008-11-10 20:51:17 +01:00
|
|
|
bool
|
2014-12-02 12:07:48 +01:00
|
|
|
extraDataValid() const
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
return privateFlags.isSet(VALID_EXTRA_DATA);
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/** Accessor function for store conditional return value.*/
|
2008-11-10 20:51:17 +01:00
|
|
|
uint64_t
|
|
|
|
getExtraData() const
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_EXTRA_DATA));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _extraData;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
|
|
|
|
2006-05-31 06:12:29 +02:00
|
|
|
/** Accessor function for store conditional return value.*/
|
2008-11-10 20:51:17 +01:00
|
|
|
void
|
2009-08-02 07:50:10 +02:00
|
|
|
setExtraData(uint64_t extraData)
|
2008-11-10 20:51:17 +01:00
|
|
|
{
|
2009-08-02 07:50:10 +02:00
|
|
|
_extraData = extraData;
|
2009-04-21 03:40:00 +02:00
|
|
|
privateFlags.set(VALID_EXTRA_DATA);
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-05-31 06:12:29 +02:00
|
|
|
|
2009-03-11 01:37:15 +01:00
|
|
|
bool
|
|
|
|
hasContextId() const
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
return privateFlags.isSet(VALID_CONTEXT_ID);
|
2009-03-11 01:37:15 +01:00
|
|
|
}
|
|
|
|
|
2008-11-03 03:57:07 +01:00
|
|
|
/** Accessor function for context ID.*/
|
2015-08-07 10:59:13 +02:00
|
|
|
ContextID
|
2008-11-10 20:51:17 +01:00
|
|
|
contextId() const
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_CONTEXT_ID));
|
2008-11-10 20:51:17 +01:00
|
|
|
return _contextId;
|
|
|
|
}
|
|
|
|
|
mem: prefetcher: add options, support for unaligned addresses
This patch extends the classic prefetcher to work on non-block aligned
addresses. Because the existing prefetchers in gem5 mask off the lower
address bits of cache accesses, many predictable strides fail to be
detected. For example, if a load were to stride by 48 bytes, with 64 byte
cachelines, the current stride based prefetcher would see an access pattern
of 0, 64, 64, 128, 192.... Thus not detecting a constant stride pattern. This
patch fixes this, by training the prefetcher on access and not masking off the
lower address bits.
It also adds the following configuration options:
1) Training/prefetching only on cache misses,
2) Training/prefetching only on data acceses,
3) Optionally tagging prefetches with a PC address.
#3 allows prefetchers to train off of prefetch requests in systems with
multiple cache levels and PC-based prefetchers present at multiple levels.
It also effectively allows a pipelining of prefetch requests (like in POWER4)
across multiple levels of cache hierarchy.
Improves performance on my gem5 configuration by 4.3% for SPECINT and 4.7% for SPECFP (geomean).
2014-01-30 06:21:25 +01:00
|
|
|
void
|
|
|
|
setPC(Addr pc)
|
|
|
|
{
|
|
|
|
privateFlags.set(VALID_PC);
|
|
|
|
_pc = pc;
|
|
|
|
}
|
|
|
|
|
2009-02-16 17:56:40 +01:00
|
|
|
bool
|
|
|
|
hasPC() const
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
return privateFlags.isSet(VALID_PC);
|
2009-02-16 17:56:40 +01:00
|
|
|
}
|
|
|
|
|
2009-03-11 01:37:15 +01:00
|
|
|
/** Accessor function for pc.*/
|
2008-11-10 20:51:17 +01:00
|
|
|
Addr
|
|
|
|
getPC() const
|
|
|
|
{
|
2009-04-21 03:40:00 +02:00
|
|
|
assert(privateFlags.isSet(VALID_PC));
|
2009-08-02 07:50:10 +02:00
|
|
|
return _pc;
|
2008-11-10 20:51:17 +01:00
|
|
|
}
|
2006-04-07 21:54:48 +02:00
|
|
|
|
2014-01-24 22:29:30 +01:00
|
|
|
/**
|
|
|
|
* Increment/Get the depth at which this request is responded to.
|
|
|
|
* This currently happens when the request misses in any cache level.
|
|
|
|
*/
|
2014-12-02 12:07:48 +01:00
|
|
|
void incAccessDepth() const { depth++; }
|
2014-01-24 22:29:30 +01:00
|
|
|
int getAccessDepth() const { return depth; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set/Get the time taken for this request to be successfully translated.
|
|
|
|
*/
|
|
|
|
void setTranslateLatency() { translateDelta = curTick() - _time; }
|
|
|
|
Tick getTranslateLatency() const { return translateDelta; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set/Get the time taken to complete this request's access, not including
|
|
|
|
* the time to successfully translate the request.
|
|
|
|
*/
|
|
|
|
void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
|
|
|
|
Tick getAccessLatency() const { return accessDelta; }
|
|
|
|
|
2015-12-07 23:42:15 +01:00
|
|
|
/**
|
|
|
|
* Accessor for the sequence number of instruction that creates the
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
hasInstSeqNum() const
|
|
|
|
{
|
|
|
|
return privateFlags.isSet(VALID_INST_SEQ_NUM);
|
|
|
|
}
|
|
|
|
|
|
|
|
InstSeqNum
|
|
|
|
getReqInstSeqNum() const
|
|
|
|
{
|
|
|
|
assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
|
|
|
|
return _reqInstSeqNum;
|
|
|
|
}
|
|
|
|
|
2015-12-07 23:42:16 +01:00
|
|
|
void
|
|
|
|
setReqInstSeqNum(const InstSeqNum seq_num)
|
|
|
|
{
|
|
|
|
privateFlags.set(VALID_INST_SEQ_NUM);
|
|
|
|
_reqInstSeqNum = seq_num;
|
|
|
|
}
|
|
|
|
|
2015-07-20 16:15:18 +02:00
|
|
|
/** Accessor functions for flags. Note that these are for testing
|
2015-07-03 16:14:36 +02:00
|
|
|
only; setting flags should be done via setFlags(). */
|
2009-08-02 07:50:10 +02:00
|
|
|
bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
|
2015-05-05 09:22:33 +02:00
|
|
|
bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
|
2009-08-02 07:50:10 +02:00
|
|
|
bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
|
|
|
|
bool isPrefetch() const { return _flags.isSet(PREFETCH); }
|
|
|
|
bool isLLSC() const { return _flags.isSet(LLSC); }
|
2013-10-31 19:41:13 +01:00
|
|
|
bool isPriv() const { return _flags.isSet(PRIVILEGED); }
|
2015-03-24 00:14:20 +01:00
|
|
|
bool isLockedRMW() const { return _flags.isSet(LOCKED_RMW); }
|
2009-08-02 07:50:10 +02:00
|
|
|
bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
|
|
|
|
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
|
2011-03-02 08:18:47 +01:00
|
|
|
bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
|
2014-01-24 22:29:30 +01:00
|
|
|
bool isSecure() const { return _flags.isSet(SECURE); }
|
2014-01-24 22:29:30 +01:00
|
|
|
bool isPTWalk() const { return _flags.isSet(PT_WALK); }
|
2015-07-20 16:15:18 +02:00
|
|
|
bool isAcquire() const { return _flags.isSet(ACQUIRE); }
|
|
|
|
bool isRelease() const { return _flags.isSet(RELEASE); }
|
2015-07-20 16:15:18 +02:00
|
|
|
bool isKernel() const { return _flags.isSet(KERNEL); }
|
2016-01-19 19:57:50 +01:00
|
|
|
bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
|
|
|
|
bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
|
|
|
|
|
|
|
|
bool
|
|
|
|
isAtomic() const
|
|
|
|
{
|
|
|
|
return _flags.isSet(ATOMIC_RETURN_OP) ||
|
|
|
|
_flags.isSet(ATOMIC_NO_RETURN_OP);
|
|
|
|
}
|
2015-07-20 16:15:18 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Accessor functions for the memory space configuration flags and used by
|
|
|
|
* GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
|
|
|
|
* these are for testing only; setting extraFlags should be done via
|
|
|
|
* setMemSpaceConfigFlags().
|
|
|
|
*/
|
|
|
|
bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
|
|
|
|
|
|
|
|
bool
|
|
|
|
isWavefrontScope() const
|
|
|
|
{
|
|
|
|
assert(isScoped());
|
|
|
|
return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isWorkgroupScope() const
|
|
|
|
{
|
|
|
|
assert(isScoped());
|
|
|
|
return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isDeviceScope() const
|
|
|
|
{
|
|
|
|
assert(isScoped());
|
|
|
|
return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isSystemScope() const
|
|
|
|
{
|
|
|
|
assert(isScoped());
|
|
|
|
return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isGlobalSegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
|
|
|
|
(!isGroupSegment() && !isPrivateSegment() &&
|
|
|
|
!isKernargSegment() && !isReadonlySegment() &&
|
|
|
|
!isSpillSegment() && !isArgSegment());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isGroupSegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isPrivateSegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isKernargSegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isReadonlySegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isSpillSegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isArgSegment() const
|
|
|
|
{
|
|
|
|
return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
|
|
|
|
}
|
2006-01-31 20:20:39 +01:00
|
|
|
};
|
2006-01-31 18:12:49 +01:00
|
|
|
|
|
|
|
#endif // __MEM_REQUEST_HH__
|