X86: Implement a partial, sort of correct version of the protected mode variant of iret.

This commit is contained in:
Gabe Black 2008-06-12 00:53:01 -04:00
parent 66f54a6037
commit 8d2416c6e9
2 changed files with 191 additions and 10 deletions

View file

@ -1,4 +1,4 @@
// Copyright (c) 2007 The Hewlett-Packard Development Company
// Copyright (c) 2007-2008 The Hewlett-Packard Development Company
// All rights reserved.
//
// Redistribution and use of this software in source and binary forms,
@ -423,7 +423,11 @@
0x0: Inst::UD2();
default: into();
}
0x7: iret();
0x7: decode MODE_SUBMODE {
0x4: Inst::IRET_REAL();
0x3: Inst::IRET_VIRT();
default: Inst::IRET_PROT();
}
}
}
0x1A: decode OPCODE_OP_BOTTOM3 {

View file

@ -1,4 +1,4 @@
# Copyright (c) 2007 The Hewlett-Packard Development Company
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
@ -53,16 +53,193 @@
#
# Authors: Gabe Black
microcode = ""
microcode = '''
def macroop IRET_REAL {
panic "Real mode iret isn't implemented!"
};
def macroop IRET_PROT {
.adjust_env oszIn64Override
# Check for a nested task. This isn't supported at the moment.
rflag t1, NT
panic "Task switching with iret is unimplemented!", flags=(nCEZF,)
#t1 = temp_RIP
#t2 = temp_CS
#t3 = temp_RFLAGS
#t4 = handy m5 register
# Pop temp_RIP, temp_CS, and temp_RFLAGS
ld t1, ss, [1, t0, rsp], "0 * env.stackSize", dataSize=ssz
ld t2, ss, [1, t0, rsp], "1 * env.stackSize", dataSize=ssz
ld t3, ss, [1, t0, rsp], "2 * env.stackSize", dataSize=ssz
###
### Handle if we're returning to virtual 8086 mode.
###
#IF ((temp_RFLAGS.VM=1) && (CPL=0) && (LEGACY_MODE))
# IRET_FROM_PROTECTED_TO_VIRTUAL
#temp_RFLAGS.VM != 1
rcri t0, t3, 18, flags=(ECF,)
bri t0, label("protToVirtFallThrough"), flags=(nCECF,)
#CPL=0
rdm5reg t4
andi t0, t4, 0x30, flags=(EZF,)
bri t0, label("protToVirtFallThrough"), flags=(nCEZF,)
#(LEGACY_MODE)
rcri t0, t4, 1, flags=(ECF,)
bri t0, label("protToVirtFallThrough"), flags=(nCECF,)
panic "iret to virtual mode not supported"
protToVirtFallThrough:
#temp_CPL = temp_CS.rpl
andi t5, t2, 0x3
###
### Read in the info for the new CS segment.
###
#CS = READ_DESCRIPTOR (temp_CS, iret_chk)
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
bri t0, label("processCSDescriptor"), flags=(CEZF,)
andi t6, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
bri t0, label("globalCSDescriptor"), flags=(CEZF,)
ld t6, tsl, [1, t0, t6], dataSize=8
bri t0, label("processCSDescriptor")
globalCSDescriptor:
ld t6, tsg, [1, t0, t6], dataSize=8
processCSDescriptor:
chks t2, t6, dataSize=8
# This actually updates state which is wrong. It should wait until we know
# we're not going to fault. Unfortunately, that's hard to do.
wrdl cs, t6, t2
wrsel cs, t2
#CPL = temp_CPL
###
### Get the new stack pointer and stack segment off the old stack if necessary,
### and piggyback on the logic to check the new RIP value.
###
#IF ((64BIT_MODE) || (temp_CPL!=CPL))
#{
#(64BIT_MODE)
andi t0, t4, 0xE, flags=(EZF,)
# Since we just found out we're in 64 bit mode, take advantage and
# do the appropriate RIP checks.
bri t0, label("doPopStackStuffAndCheckRIP"), flags=(CEZF,)
# Here, we know we're -not- in 64 bit mode, so we should do the
# appropriate/other RIP checks.
# if temp_RIP > CS.limit throw #GP(0)
rdlimit t6, cs
subi t0, t1, t6, flags=(ECF,)
fault "new GeneralProtection(0)", flags=(CECF,)
#(temp_CPL!=CPL)
srli t7, t4, 4
xor t7, t7, t5
andi t0, t7, 0x3, flags=(EZF,)
bri t0, label("doPopStackStuff"), flags=(nCEZF,)
# We can modify user visible state here because we're know
# we're done with things that can fault.
addi rsp, rsp, "3 * env.stackSize"
bri t0, label("fallThroughPopStackStuff")
doPopStackStuffAndCheckRIP:
# Check if the RIP is canonical.
sra t7, t1, 47, flags=(EZF,), dataSize=ssz
# if t7 isn't 0 or -1, it wasn't canonical.
bri t0, label("doPopStackStuff"), flags=(CEZF,)
addi t0, t7, 1, flags=(EZF,), dataSize=ssz
fault "new GeneralProtection(0)", flags=(nCEZF,)
doPopStackStuff:
# POP.v temp_RSP
ld t6, ss, [1, t0, rsp], "3 * env.dataSize", dataSize=ssz
# POP.v temp_SS
ld t2, ss, [1, t0, rsp], "4 * env.dataSize", dataSize=ssz
# SS = READ_DESCRIPTOR (temp_SS, ss_chk)
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
bri t0, label("processSSDescriptor"), flags=(CEZF,)
andi t7, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
bri t0, label("globalSSDescriptor"), flags=(CEZF,)
ld t7, tsl, [1, t0, t7], dataSize=8
bri t0, label("processSSDescriptor")
globalSSDescriptor:
ld t7, tsg, [1, t0, t7], dataSize=8
processSSDescriptor:
chks t2, t7, dataSize=8
# This actually updates state which is wrong. It should wait until we know
# we're not going to fault. Unfortunately, that's hard to do.
wrdl cs, t7, t2
wrsel cs, t2
###
### From this point downwards, we can't fault. We can update user visible state.
###
# RSP.s = temp_RSP
mov rsp, rsp, t6, dataSize=ssz
#}
fallThroughPopStackStuff:
#IF (changing CPL)
#{
srli t7, t4, 4
xor t7, t7, t5
andi t0, t7, 0x3, flags=(EZF,)
bri t0, label("skipSegmentSquashing"), flags=(CEZF,)
# The attribute register needs to keep track of more info before this will
# work the way it needs to.
# FOR (seg = ES, DS, FS, GS)
# IF ((seg.attr.dpl < cpl && ((seg.attr.type = 'data')
# || (seg.attr.type = 'non-conforming-code')))
# {
# seg = NULL
# }
#}
skipSegmentSquashing:
# Ignore this for now.
#RFLAGS.v = temp_RFLAGS
# VIF,VIP,IOPL only changed if (old_CPL = 0)
# IF only changed if (old_CPL <= old_RFLAGS.IOPL)
# VM unchanged
# RF cleared
#RIP = temp_RIP
wrip t0, t1, dataSize=ssz
};
def macroop IRET_VIRT {
panic "Virtual mode iret isn't implemented!"
};
'''
#let {{
# class INT(Inst):
# "GenFault ${new UnimpInstFault}"
# class INTO(Inst):
# "GenFault ${new UnimpInstFault}"
# class IRET(Inst):
# "GenFault ${new UnimpInstFault}"
# class IRETD(Inst):
# "GenFault ${new UnimpInstFault}"
# class IRETQ(Inst):
# "GenFault ${new UnimpInstFault}"
#}};