isa_parser: Move more stuff into the ISAParser class
This commit is contained in:
parent
f7a627338c
commit
f82a92925c
|
@ -1222,8 +1222,10 @@ namespace %(namespace)s {
|
||||||
'''
|
'''
|
||||||
|
|
||||||
class ISAParser(Grammar):
|
class ISAParser(Grammar):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, output_dir):
|
||||||
super(ISAParser, self).__init__(*args, **kwargs)
|
super(ISAParser, self).__init__()
|
||||||
|
self.output_dir = output_dir
|
||||||
|
|
||||||
self.templateMap = {}
|
self.templateMap = {}
|
||||||
|
|
||||||
#####################################################################
|
#####################################################################
|
||||||
|
@ -1915,115 +1917,120 @@ StaticInstPtr
|
||||||
|
|
||||||
# END OF GRAMMAR RULES
|
# END OF GRAMMAR RULES
|
||||||
|
|
||||||
# Now build the parser.
|
def update_if_needed(self, file, contents):
|
||||||
parser = ISAParser()
|
'''Update the output file only if the new contents are
|
||||||
|
different from the current contents. Minimizes the files that
|
||||||
|
need to be rebuilt after minor changes.'''
|
||||||
|
|
||||||
# Update the output file only if the new contents are different from
|
file = os.path.join(self.output_dir, file)
|
||||||
# the current contents. Minimizes the files that need to be rebuilt
|
update = False
|
||||||
# after minor changes.
|
if os.access(file, os.R_OK):
|
||||||
def update_if_needed(file, contents):
|
f = open(file, 'r')
|
||||||
update = False
|
old_contents = f.read()
|
||||||
if os.access(file, os.R_OK):
|
f.close()
|
||||||
f = open(file, 'r')
|
if contents != old_contents:
|
||||||
old_contents = f.read()
|
print 'Updating', file
|
||||||
f.close()
|
os.remove(file) # in case it's write-protected
|
||||||
if contents != old_contents:
|
update = True
|
||||||
print 'Updating', file
|
else:
|
||||||
os.remove(file) # in case it's write-protected
|
print 'File', file, 'is unchanged'
|
||||||
update = True
|
|
||||||
else:
|
else:
|
||||||
print 'File', file, 'is unchanged'
|
print 'Generating', file
|
||||||
else:
|
update = True
|
||||||
print 'Generating', file
|
if update:
|
||||||
update = True
|
f = open(file, 'w')
|
||||||
if update:
|
f.write(contents)
|
||||||
f = open(file, 'w')
|
f.close()
|
||||||
f.write(contents)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
# This regular expression matches '##include' directives
|
# This regular expression matches '##include' directives
|
||||||
includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
|
includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[\w/.-]*)".*$',
|
||||||
re.MULTILINE)
|
re.MULTILINE)
|
||||||
|
|
||||||
# Function to replace a matched '##include' directive with the
|
def replace_include(self, matchobj, dirname):
|
||||||
# contents of the specified file (with nested ##includes replaced
|
"""Function to replace a matched '##include' directive with the
|
||||||
# recursively). 'matchobj' is an re match object (from a match of
|
contents of the specified file (with nested ##includes
|
||||||
# includeRE) and 'dirname' is the directory relative to which the file
|
replaced recursively). 'matchobj' is an re match object
|
||||||
# path should be resolved.
|
(from a match of includeRE) and 'dirname' is the directory
|
||||||
def replace_include(matchobj, dirname):
|
relative to which the file path should be resolved."""
|
||||||
fname = matchobj.group('filename')
|
|
||||||
full_fname = os.path.normpath(os.path.join(dirname, fname))
|
|
||||||
contents = '##newfile "%s"\n%s\n##endfile\n' % \
|
|
||||||
(full_fname, read_and_flatten(full_fname))
|
|
||||||
return contents
|
|
||||||
|
|
||||||
# Read a file and recursively flatten nested '##include' files.
|
fname = matchobj.group('filename')
|
||||||
def read_and_flatten(filename):
|
full_fname = os.path.normpath(os.path.join(dirname, fname))
|
||||||
current_dir = os.path.dirname(filename)
|
contents = '##newfile "%s"\n%s\n##endfile\n' % \
|
||||||
try:
|
(full_fname, self.read_and_flatten(full_fname))
|
||||||
contents = open(filename).read()
|
return contents
|
||||||
except IOError:
|
|
||||||
error(0, 'Error including file "%s"' % filename)
|
|
||||||
fileNameStack.push((filename, 0))
|
|
||||||
# Find any includes and include them
|
|
||||||
contents = includeRE.sub(lambda m: replace_include(m, current_dir),
|
|
||||||
contents)
|
|
||||||
fileNameStack.pop()
|
|
||||||
return contents
|
|
||||||
|
|
||||||
#
|
def read_and_flatten(self, filename):
|
||||||
# Read in and parse the ISA description.
|
"""Read a file and recursively flatten nested '##include' files."""
|
||||||
#
|
|
||||||
def parse_isa_desc(isa_desc_file, output_dir):
|
|
||||||
# Read file and (recursively) all included files into a string.
|
|
||||||
# PLY requires that the input be in a single string so we have to
|
|
||||||
# do this up front.
|
|
||||||
isa_desc = read_and_flatten(isa_desc_file)
|
|
||||||
|
|
||||||
# Initialize filename stack with outer file.
|
current_dir = os.path.dirname(filename)
|
||||||
fileNameStack.push((isa_desc_file, 0))
|
try:
|
||||||
|
contents = open(filename).read()
|
||||||
|
except IOError:
|
||||||
|
error(0, 'Error including file "%s"' % filename)
|
||||||
|
|
||||||
# Parse it.
|
fileNameStack.push((filename, 0))
|
||||||
(isa_name, namespace, global_code, namespace_code) = parser.parse(isa_desc)
|
|
||||||
|
|
||||||
# grab the last three path components of isa_desc_file to put in
|
# Find any includes and include them
|
||||||
# the output
|
def replace(matchobj):
|
||||||
filename = '/'.join(isa_desc_file.split('/')[-3:])
|
return self.replace_include(matchobj, current_dir)
|
||||||
|
contents = self.includeRE.sub(replace, contents)
|
||||||
|
|
||||||
# generate decoder.hh
|
fileNameStack.pop()
|
||||||
includes = '#include "base/bitfield.hh" // for bitfield support'
|
return contents
|
||||||
global_output = global_code.header_output
|
|
||||||
namespace_output = namespace_code.header_output
|
|
||||||
decode_function = ''
|
|
||||||
update_if_needed(output_dir + '/decoder.hh', file_template % vars())
|
|
||||||
|
|
||||||
# generate decoder.cc
|
def parse_isa_desc(self, isa_desc_file):
|
||||||
includes = '#include "decoder.hh"'
|
'''Read in and parse the ISA description.'''
|
||||||
global_output = global_code.decoder_output
|
|
||||||
namespace_output = namespace_code.decoder_output
|
|
||||||
# namespace_output += namespace_code.decode_block
|
|
||||||
decode_function = namespace_code.decode_block
|
|
||||||
update_if_needed(output_dir + '/decoder.cc', file_template % vars())
|
|
||||||
|
|
||||||
# generate per-cpu exec files
|
# Read file and (recursively) all included files into a string.
|
||||||
for cpu in cpu_models:
|
# PLY requires that the input be in a single string so we have to
|
||||||
includes = '#include "decoder.hh"\n'
|
# do this up front.
|
||||||
includes += cpu.includes
|
isa_desc = self.read_and_flatten(isa_desc_file)
|
||||||
global_output = global_code.exec_output[cpu.name]
|
|
||||||
namespace_output = namespace_code.exec_output[cpu.name]
|
# Initialize filename stack with outer file.
|
||||||
|
fileNameStack.push((isa_desc_file, 0))
|
||||||
|
|
||||||
|
# Parse it.
|
||||||
|
(isa_name, namespace, global_code, namespace_code) = \
|
||||||
|
self.parse(isa_desc)
|
||||||
|
|
||||||
|
# grab the last three path components of isa_desc_file to put in
|
||||||
|
# the output
|
||||||
|
filename = '/'.join(isa_desc_file.split('/')[-3:])
|
||||||
|
|
||||||
|
# generate decoder.hh
|
||||||
|
includes = '#include "base/bitfield.hh" // for bitfield support'
|
||||||
|
global_output = global_code.header_output
|
||||||
|
namespace_output = namespace_code.header_output
|
||||||
decode_function = ''
|
decode_function = ''
|
||||||
update_if_needed(output_dir + '/' + cpu.filename,
|
self.update_if_needed('decoder.hh', file_template % vars())
|
||||||
file_template % vars())
|
|
||||||
|
|
||||||
# The variable names here are hacky, but this will creat local variables
|
# generate decoder.cc
|
||||||
# which will be referenced in vars() which have the value of the globals.
|
includes = '#include "decoder.hh"'
|
||||||
global maxInstSrcRegs
|
global_output = global_code.decoder_output
|
||||||
MaxInstSrcRegs = maxInstSrcRegs
|
namespace_output = namespace_code.decoder_output
|
||||||
global maxInstDestRegs
|
# namespace_output += namespace_code.decode_block
|
||||||
MaxInstDestRegs = maxInstDestRegs
|
decode_function = namespace_code.decode_block
|
||||||
# max_inst_regs.hh
|
self.update_if_needed('decoder.cc', file_template % vars())
|
||||||
update_if_needed(output_dir + '/max_inst_regs.hh', \
|
|
||||||
max_inst_regs_template % vars())
|
# generate per-cpu exec files
|
||||||
|
for cpu in cpu_models:
|
||||||
|
includes = '#include "decoder.hh"\n'
|
||||||
|
includes += cpu.includes
|
||||||
|
global_output = global_code.exec_output[cpu.name]
|
||||||
|
namespace_output = namespace_code.exec_output[cpu.name]
|
||||||
|
decode_function = ''
|
||||||
|
self.update_if_needed(cpu.filename, file_template % vars())
|
||||||
|
|
||||||
|
# The variable names here are hacky, but this will creat local
|
||||||
|
# variables which will be referenced in vars() which have the
|
||||||
|
# value of the globals.
|
||||||
|
global maxInstSrcRegs
|
||||||
|
MaxInstSrcRegs = maxInstSrcRegs
|
||||||
|
global maxInstDestRegs
|
||||||
|
MaxInstDestRegs = maxInstDestRegs
|
||||||
|
# max_inst_regs.hh
|
||||||
|
self.update_if_needed('max_inst_regs.hh',
|
||||||
|
max_inst_regs_template % vars())
|
||||||
|
|
||||||
# global list of CpuModel objects (see cpu_models.py)
|
# global list of CpuModel objects (see cpu_models.py)
|
||||||
cpu_models = []
|
cpu_models = []
|
||||||
|
@ -2033,4 +2040,5 @@ cpu_models = []
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
execfile(sys.argv[1]) # read in CpuModel definitions
|
execfile(sys.argv[1]) # read in CpuModel definitions
|
||||||
cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
|
cpu_models = [CpuModel.dict[cpu] for cpu in sys.argv[4:]]
|
||||||
parse_isa_desc(sys.argv[2], sys.argv[3])
|
parser = ISAParser(sys.argv[3])
|
||||||
|
parser.parse_isa_desc(sys.argv[2])
|
||||||
|
|
Loading…
Reference in a new issue