X86: Fix segment limit checks.

This commit is contained in:
Gabe Black 2009-02-27 09:23:50 -08:00
parent 9491debaa6
commit 9dfa3f7f73
3 changed files with 21 additions and 22 deletions

View file

@ -67,7 +67,8 @@ namespace X86ISA
static const Request::FlagsType SegmentFlagMask = mask(4); static const Request::FlagsType SegmentFlagMask = mask(4);
static const int FlagShift = 4; static const int FlagShift = 4;
enum FlagBit { enum FlagBit {
CPL0FlagBit = 1 CPL0FlagBit = 1,
AddrSizeFlagBit = 2
}; };
/** /**

View file

@ -375,6 +375,8 @@ let {{
self.memFlags += " | (CPL0FlagBit << FlagShift)" self.memFlags += " | (CPL0FlagBit << FlagShift)"
if prefetch: if prefetch:
self.memFlags += " | Request::PF_EXCLUSIVE" self.memFlags += " | Request::PF_EXCLUSIVE"
self.memFlags += " | (machInst.legacy.addr ? " + \
"(AddrSizeFlagBit << FlagShift) : 0)"
def getAllocator(self, *microFlags): def getAllocator(self, *microFlags):
allocator = '''new %(class_name)s(machInst, macrocodeBlock allocator = '''new %(class_name)s(machInst, macrocodeBlock
@ -439,7 +441,7 @@ let {{
defineMicroLoadOp('Ldfp', 'FpData.uqw = Mem;') defineMicroLoadOp('Ldfp', 'FpData.uqw = Mem;')
def defineMicroStoreOp(mnemonic, code, \ def defineMicroStoreOp(mnemonic, code, \
postCode="", completeCode="", mem_flags=0): postCode="", completeCode="", mem_flags="0"):
global header_output global header_output
global decoder_output global decoder_output
global exec_output global exec_output

View file

@ -575,39 +575,35 @@ TLB::translate(RequestPtr req, ThreadContext *tc,
if (!tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg))) if (!tc->readMiscRegNoEffect(MISCREG_SEG_SEL(seg)))
return new GeneralProtection(0); return new GeneralProtection(0);
bool expandDown = false; bool expandDown = false;
if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg)); SegAttr attr = tc->readMiscRegNoEffect(MISCREG_SEG_ATTR(seg));
if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
if (!attr.writable && write) if (!attr.writable && write)
return new GeneralProtection(0); return new GeneralProtection(0);
if (!attr.readable && !write && !execute) if (!attr.readable && !write && !execute)
return new GeneralProtection(0); return new GeneralProtection(0);
expandDown = attr.expandDown; expandDown = attr.expandDown;
} }
Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg)); Addr base = tc->readMiscRegNoEffect(MISCREG_SEG_BASE(seg));
Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg)); Addr limit = tc->readMiscRegNoEffect(MISCREG_SEG_LIMIT(seg));
// This assumes we're not in 64 bit mode. If we were, the default
// address size is 64 bits, overridable to 32.
int size = 32;
bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
if (csAttr.defaultSize && sizeOverride ||
!csAttr.defaultSize && !sizeOverride)
size = 16;
Addr offset = bits(vaddr - base, size-1, 0);
Addr endOffset = offset + req->getSize() - 1;
if (expandDown) { if (expandDown) {
DPRINTF(TLB, "Checking an expand down segment.\n"); DPRINTF(TLB, "Checking an expand down segment.\n");
// We don't have to worry about the access going around the warn_once("Expand down segments are untested.\n");
// end of memory because accesses will be broken up into if (offset <= limit || endOffset <= limit)
// pieces at boundaries aligned on sizes smaller than an
// entire address space. We do have to worry about the limit
// being less than the base.
if (limit < base) {
if (limit < vaddr + req->getSize() && vaddr < base)
return new GeneralProtection(0); return new GeneralProtection(0);
} else { } else {
if (limit < vaddr + req->getSize()) if (offset > limit || endOffset > limit)
return new GeneralProtection(0); return new GeneralProtection(0);
} }
} else {
if (limit < base) {
if (vaddr <= limit || vaddr + req->getSize() >= base)
return new GeneralProtection(0);
} else {
if (vaddr <= limit && vaddr + req->getSize() >= base)
return new GeneralProtection(0);
}
}
} }
// If paging is enabled, do the translation. // If paging is enabled, do the translation.
if (cr0.pg) { if (cr0.pg) {