summaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPatrick Georgi <patrick.georgi@coresystems.de>2010-04-08 11:37:43 +0000
committerPatrick Georgi <patrick.georgi@coresystems.de>2010-04-08 11:37:43 +0000
commit7e8c9aa271f13f67e4fc4968d2bf6fb8e5b229d7 (patch)
treeb962ba3cc6ce30f32c1d0c37c3210abb6e37b10e /util
parent56a684a2ee52b765fc69ec8c922c3da9d8ab7430 (diff)
Replace sconfig with a C implementation.
(smaller, faster, standard parser generator, no more python) Provide precompiled parser, so bison and flex are optional dependencies. Adapt Makefile and abuild (which uses some sconfig file as a magic path) to match. Drop python as dependency from README, and add bison and flex as optional dependencies Signed-off-by: Patrick Georgi <patrick.georgi@coresystems.de> Acked-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Ronald G. Minnich <rminnich@gmail.com> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5373 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'util')
-rwxr-xr-xutil/abuild/abuild4
-rw-r--r--util/sconfig/LICENSE18
-rw-r--r--util/sconfig/Makefile31
-rw-r--r--util/sconfig/Makefile.inc30
-rw-r--r--util/sconfig/NOTES46
-rw-r--r--util/sconfig/config.g1028
-rw-r--r--util/sconfig/lex.yy.c_shipped1932
-rw-r--r--util/sconfig/parsedesc.g195
-rwxr-xr-xutil/sconfig/sconfig.l52
-rw-r--r--util/sconfig/sconfig.tab.c_shipped2088
-rw-r--r--util/sconfig/sconfig.tab.h_shipped90
-rwxr-xr-xutil/sconfig/sconfig.y499
-rw-r--r--util/sconfig/test.config6
-rw-r--r--util/sconfig/yapps2.py779
-rw-r--r--util/sconfig/yapps2.tex1225
-rw-r--r--util/sconfig/yappsrt.py172
16 files changed, 4693 insertions, 3502 deletions
diff --git a/util/abuild/abuild b/util/abuild/abuild
index 7d4c9bb4cb..811a32cbfc 100755
--- a/util/abuild/abuild
+++ b/util/abuild/abuild
@@ -558,8 +558,8 @@ target=""
buildall=false
verbose=false
-test -f util/sconfig/config.g && ROOT=$( pwd )
-test -f ../util/sconfig/config.g && ROOT=$( cd ..; pwd )
+test -f util/sconfig/sconfig.l && ROOT=$( pwd )
+test -f ../util/sconfig/sconfig.l && ROOT=$( cd ..; pwd )
test "$ROOT" = "" && ROOT=$( cd ../..; pwd )
# parse parameters.. try to find out whether we're running GNU getopt
diff --git a/util/sconfig/LICENSE b/util/sconfig/LICENSE
deleted file mode 100644
index 64f38b89f2..0000000000
--- a/util/sconfig/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/util/sconfig/Makefile b/util/sconfig/Makefile
deleted file mode 100644
index 3328380569..0000000000
--- a/util/sconfig/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-ALL: $(shell echo *.g | sed s/\\.g/\\.py/g )
-
-%.py: %.g yapps2.py yappsrt.py Makefile
- python yapps2.py $<
-
-DOC: yapps2.ps yapps2.pdf manual/index.html
-
-yapps2.ps: yapps2.dvi
- dvips -q yapps2.dvi -o yapps2.ps
-
-yapps2.pdf: yapps2.ps
- ps2pdf yapps2.ps
-
-yapps2.dvi: yapps2.tex
- latex yapps2.tex
-
-manual/index.html: yapps2.aux yapps2.tex
- rm manual/yapps2.css
- latex2html -dir 'manual' -mkdir -lcase_tags -font_size 12pt -split 4 -toc_depth 4 -html_version 4.0,unicode,table -t 'Yapps 2.0 Manual' -address 'Amit J Patel, amitp@cs.stanford.edu' -info 0 -show_section_numbers -up_title 'Yapps Page' -up_url 'http://theory.stanford.edu/~amitp/yapps/' -strict -image_type png yapps2.tex
- echo '@import url("http://www-cs-students.stanford.edu/~amitp/amitp.css");' > manual/yapps2-new.css
- echo 'hr { display:none; }' >> manual/yapps2-new.css
- echo 'h1 br, h2 br { display:none; }' >>manual/yapps2-new.css
- cat manual/yapps2.css >> manual/yapps2-new.css
- rm manual/yapps2.css
- mv manual/yapps2-new.css manual/yapps2.css
-
-DISTRIB:
- cd ..; zip -u yapps2.zip yapps2/{LICENSE,yapps2.py,yappsrt.py,parsedesc.g,examples/*.g,NOTES,yapps2.tex,Makefile,manual/*.html,manual/*.css,manual/*.png}
-
-clean:
- rm -f config.py yappsrt.pyc parsedesc.py
diff --git a/util/sconfig/Makefile.inc b/util/sconfig/Makefile.inc
new file mode 100644
index 0000000000..54207ebd16
--- /dev/null
+++ b/util/sconfig/Makefile.inc
@@ -0,0 +1,30 @@
+sconfigobj :=
+sconfigobj += lex.yy.o
+sconfigobj += sconfig.tab.o
+
+$(obj)/util/sconfig:
+ mkdir -p $@
+
+$(obj)/util/sconfig/%.o: $(obj)/util/sconfig/%.c
+ printf " HOSTCC $(subst $(obj)/,,$(@))\n"
+ $(HOSTCC) $(SCONFIGFLAGS) $(HOSTCFLAGS) -c -o $@ $<
+
+ifdef SCONFIG_GENPARSER
+$(top)/util/sconfig/lex.yy.c_shipped: $(top)/util/sconfig/sconfig.l
+ flex -o $@ $<
+
+# the .c rule also creates .h
+$(top)/util/sconfig/sconfig.tab.h_shipped: $(top)/util/sconfig/sconfig.tab.c_shipped
+$(top)/util/sconfig/sconfig.tab.c_shipped: $(top)/util/sconfig/sconfig.y
+ bison --defines=$(top)/util/sconfig/sconfig.tab.h_shipped -o $@ $<
+
+endif
+
+$(obj)/util/sconfig/lex.yy.o: $(obj)/util/sconfig/sconfig.tab.h
+
+$(obj)/util/sconfig/%: $(top)/util/sconfig/%_shipped
+ cp $< $@
+
+$(obj)/util/sconfig/sconfig: $(obj)/util/sconfig $(addprefix $(obj)/util/sconfig/,$(sconfigobj))
+ printf " HOSTCXX $(subst $(obj)/,,$(@)) (link)\n"
+ $(HOSTCXX) $(SCONFIGFLAGS) -o $@ $(addprefix $(obj)/util/sconfig/,$(sconfigobj))
diff --git a/util/sconfig/NOTES b/util/sconfig/NOTES
deleted file mode 100644
index 325e76a479..0000000000
--- a/util/sconfig/NOTES
+++ /dev/null
@@ -1,46 +0,0 @@
-April 14, 2002:
-
-I haven't worked on Yapps for a while, mainly because I spent all my energy
-on trying to graduate. Now that I've finished school, I have several projects
-I want to start working on again, including Yapps.
-
-Notes for myself:
-
-Add a debugging mode that helps you understand how the grammar
- is constructed and how things are being parsed
-Look into an English output mode that would use natural language
- to describe a grammar
-Optimize unused variables
-Add a convenience to automatically gather up the values returned
- from subpatterns, put them into a list, and return them
-Improve the documentation
-Write some larger examples
-Get rid of old-style regex support
-Use SRE's lex support to speed up lexing (this may be hard given that
- yapps allows for context-sensitive lexers)
-Look over Dan Connoly's experience with Yapps (bugs, frustrations, etc.)
- and see what improvements could be made
-Add something to pretty-print the grammar (without the actions)
-Maybe conditionals? Follow this rule only if <condition> holds.
- But this would be useful mainly when multiple rules match, and we
- want the first matching rule. The conditional would mean we skip to
- the next rule. Maybe this is part of the attribute grammar system,
- where rule X<0> can be specified separately from X<N>.
-Convenience functions that could build return values for all rules
- without specifying the code for each rule individually
-Patterns (abstractions over rules) -- for example, comma separated values
- have a certain rule pattern that gets replicated all over the place
-"Gather" mode that simply outputs the return values for certain nodes.
- For example, if you just want all expressions, you could ask yapps
- to gather the results of the 'expr' rule into a list. This would
- ignore all the higher level structure.
-Look at everyone's Yapps grammars, and come up with larger examples
- http://www.w3.org/2000/10/swap/SemEnglish.g
- http://www.w3.org/2000/10/swap/kifExpr.g
- http://www.w3.org/2000/10/swap/rdfn3.g
-It would be nice if you could feed text into Yapps (push model) instead
- of Yapps reading text out of a string (pull model). However, I think
- that would make the resulting parser code mostly unreadable
- (like yacc, etc.). Coroutines/stacklesspython may be the answer.
-
-
diff --git a/util/sconfig/config.g b/util/sconfig/config.g
deleted file mode 100644
index cdc53a89b9..0000000000
--- a/util/sconfig/config.g
+++ /dev/null
@@ -1,1028 +0,0 @@
-# -*- python -*-
-import sys
-import os
-import re
-import string
-import types
-
-import traceback
-
-warnings = 0
-errors = 0
-
-treetop = ''
-full_mainboard_path = ''
-mainboard_path = ''
-romimages = {}
-curimage = 0
-
-# -----------------------------------------------------------------------------
-# Utility Classes
-# -----------------------------------------------------------------------------
-
-class stack:
- """Used to keep track of the current part or dir"""
- class __stack_iter:
- def __init__ (self, stack):
- self.index = 0
- self.len = len(stack)
- self.stack = stack
-
- def __iter__ (self):
- return self
-
- def next (self):
- if (self.index < self.len):
- s = self.stack[self.index]
- self.index = self.index + 1
- return s
- raise StopIteration
-
- def __init__ (self):
- self.stack = []
-
- def __len__ (self):
- return len(self.stack)
-
- def __getitem__ (self, i):
- return self.stack[i]
-
- def __iter__ (self):
- return self.__stack_iter(self.stack)
-
- def push(self, part):
- self.stack.append(part)
-
- def pop(self):
- try:
- return self.stack.pop()
- except IndexError:
- return 0
-
- def tos(self):
- try:
- return self.stack[-1]
- except IndexError:
- return 0
-
- def empty(self):
- return (len(self.stack) == 0)
-partstack = stack()
-
-class debug_info:
- none = 0
- gencode = 1
- dumptree = 2
- object = 3
- dict = 4
- statement = 5
- dump = 6
- gengraph = 7
-
- def __init__(self, *level):
- self.__level = level
-
- def setdebug(self, *level):
- self.__level = level
-
- def level(self, level):
- return level in self.__level
-
- def info(self, level, str):
- if level in self.__level:
- print str
-
-global debug
-debug = debug_info(debug_info.none)
-#debug = debug_info(debug_info.dumptree)
-#debug = debug_info(debug_info.object)
-#debug = debug_info(debug_info.gencode)
-
-# -----------------------------------------------------------------------------
-# Error Handling
-# -----------------------------------------------------------------------------
-
-def error(string):
- """Print error message"""
- global errors, loc
- errors = errors + 1
- print "===> ERROR: %s" % string
-
-def fatal(string):
- """Print error message and exit"""
- error(string)
- exitiferrors()
-
-def warning(string):
- """Print warning message"""
- global warnings, loc
- warnings = warnings + 1
- print "===> WARNING: %s" % string
-
-def exitiferrors():
- """Exit parser if an error has been encountered"""
- if (errors != 0):
- sys.exit(1)
-
-def safe_open(file, mode):
- try:
- return open(file, mode)
- except IOError:
- fatal("Could not open file \"%s\"" % file)
-
-# -----------------------------------------------------------------------------
-# Main classes
-# -----------------------------------------------------------------------------
-
-class romimage:
- """A rom image is the ultimate goal of coreboot"""
- def __init__ (self, name):
- # name of this rom image
- self.name = name
-
- # instance counter for parts
- self.partinstance = 0
-
- # chip config files included by the 'config' directive
- self.configincludes = {}
-
- # root of part tree
- self.root = 0
-
- # Last device built
- self.last_device = 0
-
- def getname(self):
- return self.name
-
- def addconfiginclude(self, part, path):
- setdict(self.configincludes, part, path)
-
- def getconfigincludes(self):
- return self.configincludes
-
- def getincludefilename(self):
- if (self.useinitincludes):
- return "crt0.S"
- else:
- return "crt0_includes.h"
-
- def newformat(self):
- return self.useinitincludes
-
- def numparts(self):
- return self.partinstance
-
- def newpartinstance(self):
- i = self.partinstance
- self.partinstance = self.partinstance + 1
- return i
-
- def setroot(self, part):
- self.root = part
-
- def getroot(self):
- return self.root
-
-class partobj:
- """A configuration part"""
- def __init__ (self, image, dir, parent, part, type_name, instance_name, chip_or_device):
- if (parent):
- debug.info(debug.object, "partobj dir %s parent %s part %s" \
- % (dir, parent.instance_name, part))
- else:
- debug.info(debug.object, "partobj dir %s part %s" \
- % (dir, part))
-
- # romimage that is configuring this part
- self.image = image
-
- # links for static device tree
- self.children = 0
- self.prev_sibling = 0
- self.next_sibling = 0
- self.prev_device = 0
- self.next_device = 0
- self.chip_or_device = chip_or_device
-
- # initializers for static device tree
- self.registercode = {}
-
- # part name
- self.part = part
-
- # type name of this part
- self.type_name = type_name
-
- # directory containing part files
- self.dir = dir
-
- # instance number, used to distinguish anonymous
- # instances of this part
- self.instance = image.newpartinstance()
- debug.info(debug.object, "INSTANCE %d" % self.instance)
-
- # Name of chip config file (0 if not needed)
- self.chipconfig = 0
-
- # Flag to indicate that we have generated type
- # definitions for this part (only want to do it once)
- self.done_types = 0
-
- # Path to the device
- self.path = ""
-
- # Resources of the device
- self.resoruce = ""
- self.resources = 0
-
- # Enabled state of the device
- self.enabled = 1
-
- # Flag if I am a duplicate device
- self.dup = 0
-
- # If there is a chip.h file, we will create an
- # include for it.
- if (dir):
- chiph = os.path.join(dir, "chip.h")
- if (os.path.exists(chiph)):
- debug.info(debug.object, "%s has chip at %s" % (self, dir))
- self.addconfig(chiph)
-
- # If no instance name is supplied then generate
- # a unique name
- if (instance_name == 0):
- self.instance_name = self.type_name + \
- "_dev%d" % self.instance
- self.chipinfo_name = "%s_info_%d" \
- % (self.type_name, self.instance)
- else:
- self.instance_name = instance_name
- self.chipinfo_name = "%s_info_%d" % (self.instance_name, self.instance)
-
- # Link this part into the device list
- if (self.chip_or_device == 'device'):
- if (image.last_device):
- image.last_device.next_device = self
- self.prev_device = image.last_device
- image.last_device = self
-
- # Link this part into the tree
- if (parent and (part != 'arch')):
- debug.info(debug.gencode, "add to parent")
- self.parent = parent
- # add current child as my sibling,
- # me as the child.
- if (parent.children):
- debug.info(debug.gencode, "add %s (%d) as sibling" % (parent.children.dir, parent.children.instance))
- youngest = parent.children
- while(youngest.next_sibling):
- youngest = youngest.next_sibling
- youngest.next_sibling = self
- self.prev_sibling = youngest
- else:
- parent.children = self
- else:
- self.parent = self
-
- def info(self):
- return "%s: %s" % (self.part, self.type)
- def type(self):
- return self.chip_or_device
-
- def readable_name(self):
- name = ""
- name = "%s_%d" % (self.type_name, self.instance)
- if (self.chip_or_device == 'chip'):
- name = "%s %s %s" % (name, self.part, self.dir)
- else:
- name = "%s %s" % (name, self.path)
- return name
-
- def graph_name(self):
- name = "{ {_dev%d|" % self.instance
- if (self.part):
- name = "%s%s" % (name, self.part)
- else:
- name = "%s%s" % (name, self.chip_or_device)
- if (self.type_name):
- name = "%s}|%s}" % (name, self.type_name)
- else:
- name = "%s}|%s}" % (name, self.parent.type_name)
- return name
-
- def dumpme(self, lvl):
- """Dump information about this part for debugging"""
- print "%d: %s" % (lvl, self.readable_name())
- print "%d: part %s" % (lvl, self.part)
- print "%d: instance %d" % (lvl, self.instance)
- print "%d: chip_or_device %s" % (lvl, self.chip_or_device)
- print "%d: dir %s" % (lvl,self.dir)
- print "%d: type_name %s" % (lvl,self.type_name)
- print "%d: parent: %s" % (lvl, self.parent.readable_name())
- if (self.children):
- print "%d: child %s" % (lvl, self.children.readable_name())
- if (self.next_sibling):
- print "%d: siblings %s" % (lvl, self.next_sibling.readable_name())
- print "%d: registercode " % lvl
- for f, v in self.registercode.items():
- print "\t%s = %s" % (f, v)
- print "%d: chipconfig %s" % (lvl, self.chipconfig)
- print "\n"
-
- def firstchilddevice(self):
- """Find the first device in the children link."""
- kid = self.children
- while (kid):
- if (kid.chip_or_device == 'device'):
- return kid
- else:
- kid = kid.children
- return 0
-
- def firstparentdevice(self):
- """Find the first device in the parent link."""
- parent = self.parent
- while (parent and (parent.parent != parent) and (parent.chip_or_device != 'device')):
- parent = parent.parent
- if ((parent.parent != parent) and (parent.chip_or_device != 'device')):
- parent = 0
- while(parent and (parent.dup == 1)):
- parent = parent.prev_sibling
- if (not parent):
- fatal("Device %s has no device parent; this is a config file error" % self.readable_name())
- return parent
-
- def firstparentdevicelink(self):
- """Find the first device in the parent link and record which link it is."""
- link = 0
- parent = self.parent
- while (parent and (parent.parent != parent) and (parent.chip_or_device != 'device')):
- parent = parent.parent
- if ((parent.parent != parent) and (parent.chip_or_device != 'device')):
- parent = 0
- while(parent and (parent.dup == 1)):
- parent = parent.prev_sibling
- link = link + 1
- if (not parent):
- fatal("Device %s has no device parent; this is a config file error" % self.readable_name())
- return link
-
-
- def firstparentchip(self):
- """Find the first chip in the parent link."""
- parent = self.parent
- while (parent):
- if ((parent.parent == parent) or (parent.chip_or_device == 'chip')):
- return parent
- else:
- parent = parent.parent
- fatal("Device %s has no chip parent; this is a config file error" % self.readable_name())
-
- def firstsiblingdevice(self):
- """Find the first device in the sibling link."""
- sibling = self.next_sibling
- while(sibling and (sibling.path == self.path)):
- sibling = sibling.next_sibling
- if ((not sibling) and (self.parent.chip_or_device == 'chip')):
- sibling = self.parent.next_sibling
- while(sibling):
- if (sibling.chip_or_device == 'device'):
- return sibling
- else:
- sibling = sibling.children
- return 0
-
- def gencode(self, file, pass_num):
- """Generate static initalizer code for this part. Two passes
- are used - the first generates type information, and the second
- generates instance information"""
- if (pass_num == 0):
- if (self.chip_or_device == 'chip'):
- return;
- else:
- if (self.instance):
- file.write("struct device %s;\n" \
- % self.instance_name)
- else:
- file.write("struct device dev_root;\n")
- return
- # This is pass the second, which is pass number 1
- # this is really just a case statement ...
-
- if (self.chip_or_device == 'chip'):
- if (self.chipconfig):
- debug.info(debug.gencode, "gencode: chipconfig(%d)" % \
- self.instance)
- file.write("struct %s_config %s" % (self.type_name ,\
- self.chipinfo_name))
- if (self.registercode):
- file.write("\t= {\n")
- for f, v in self.registercode.items():
- file.write( "\t.%s = %s,\n" % (f, v))
- file.write("};\n")
- else:
- file.write(";")
- file.write("\n")
-
- if (self.instance == 0):
- self.instance_name = "dev_root"
- file.write("struct device **last_dev_p = &%s.next;\n" % (self.image.last_device.instance_name))
- file.write("struct device dev_root = {\n")
- file.write("\t.ops = &default_dev_ops_root,\n")
- file.write("\t.bus = &dev_root.link[0],\n")
- file.write("\t.path = { .type = DEVICE_PATH_ROOT },\n")
- file.write("\t.enabled = 1,\n\t.links = 1,\n")
- file.write("\t.on_mainboard = 1,\n")
- file.write("\t.link = {\n\t\t[0] = {\n")
- file.write("\t\t\t.dev=&dev_root,\n\t\t\t.link = 0,\n")
- file.write("\t\t\t.children = &%s,\n" % self.firstchilddevice().instance_name)
- file.write("\t\t},\n")
- file.write("\t},\n")
- if (self.chipconfig):
- file.write("\t.chip_ops = &%s_ops,\n" % self.type_name)
- file.write("\t.chip_info = &%s_info_%s,\n" % (self.type_name, self.instance))
- file.write("\t.next = &%s,\n" % self.firstchilddevice().instance_name)
- file.write("};\n")
- return
-
- # Don't print duplicate devices, just print their children
- if (self.dup):
- return
-
- file.write("struct device %s = {\n" % self.instance_name)
- file.write("\t.ops = 0,\n")
- file.write("\t.bus = &%s.link[%d],\n" % \
- (self.firstparentdevice().instance_name, \
- self.firstparentdevicelink()))
- file.write("\t.path = {%s},\n" % self.path)
- file.write("\t.enabled = %d,\n" % self.enabled)
- file.write("\t.on_mainboard = 1,\n")
- if (self.resources):
- file.write("\t.resources = %d,\n" % self.resources)
- file.write("\t.resource = {%s\n\t },\n" % self.resource)
- file.write("\t.link = {\n");
- links = 0
- bus = self
- while(bus and (bus.path == self.path)):
- child = bus.firstchilddevice()
- if (child or (bus != self) or (bus.next_sibling and (bus.next_sibling.path == self.path))):
- file.write("\t\t[%d] = {\n" % links)
- file.write("\t\t\t.link = %d,\n" % links)
- file.write("\t\t\t.dev = &%s,\n" % self.instance_name)
- if (child):
- file.write("\t\t\t.children = &%s,\n" %child.instance_name)
- file.write("\t\t},\n")
- links = links + 1
- if (1):
- bus = bus.next_sibling
- else:
- bus = 0
- file.write("\t},\n")
- file.write("\t.links = %d,\n" % (links))
- sibling = self.firstsiblingdevice();
- if (sibling):
- file.write("\t.sibling = &%s,\n" % sibling.instance_name)
- chip = self.firstparentchip()
- if (chip and chip.chipconfig):
- file.write("\t.chip_ops = &%s_ops,\n" % chip.type_name)
- file.write("\t.chip_info = &%s_info_%s,\n" % (chip.type_name, chip.instance))
- if (self.next_device):
- file.write("\t.next=&%s\n" % self.next_device.instance_name)
- file.write("};\n")
- return
-
- def addconfig(self, path):
- """Add chip config file to this part"""
- self.chipconfig = os.path.join(self.dir, path)
- self.image.addconfiginclude(self.type_name, self.chipconfig)
-
- def addregister(self, field, value):
- """Register static initialization information"""
- if (self.chip_or_device != 'chip'):
- fatal("Only chips can have register values")
- field = dequote(field)
- value = dequote(value)
- setdict(self.registercode, field, value)
-
- def set_enabled(self, enabled):
- self.enabled = enabled
-
- def start_resources(self):
- self.resource = ""
- self.resources = 0
-
- def end_resources(self):
- self.resource = "%s" % (self.resource)
-
- def add_resource(self, type, index, value):
- """ Add a resource to a device """
- self.resource = "%s\n\t\t{ .flags=%s, .index=0x%x, .base=0x%x}," % (self.resource, type, index, value)
- self.resources = self.resources + 1
-
- def set_path(self, path):
- self.path = path
- if (self.prev_sibling and (self.prev_sibling.path == self.path)):
- self.dup = 1
- if (self.prev_device):
- self.prev_device.next_device = self.next_device
- if (self.next_device):
- self.next_device.prev_device = self.prev_device
- if (self.image.last_device == self):
- self.image.last_device = self.prev_device
- self.prev_device = 0
- self.next_device = 0
-
- def addpcipath(self, slot, function):
- """ Add a relative pci style path from our parent to this device """
- if ((slot < 0) or (slot > 0x1f)):
- fatal("Invalid device id")
- if ((function < 0) or (function > 7)):
- fatal("Invalid pci function %s" % function )
- self.set_path(".type=DEVICE_PATH_PCI,{.pci={ .devfn = PCI_DEVFN(0x%x,%d)}}" % (slot, function))
-
- def addpnppath(self, port, device):
- """ Add a relative path to a pnp device hanging off our parent """
- if ((port < 0) or (port > 65536)):
- fatal("Invalid port")
- if ((device < 0) or (device > 0xffff)):
- fatal("Invalid device")
- self.set_path(".type=DEVICE_PATH_PNP,{.pnp={ .port = 0x%x, .device = 0x%x }}" % (port, device))
-
- def addi2cpath(self, device):
- """ Add a relative path to a i2c device hanging off our parent """
- if ((device < 0) or (device > 0x7f)):
- fatal("Invalid device")
- self.set_path(".type=DEVICE_PATH_I2C,{.i2c={ .device = 0x%x }}" % (device))
-
- def addapicpath(self, apic_id):
- """ Add a relative path to a cpu device hanging off our parent """
- if ((apic_id < 0) or (apic_id > 255)):
- fatal("Invalid device")
- self.set_path(".type=DEVICE_PATH_APIC,{.apic={ .apic_id = 0x%x }}" % (apic_id))
-
- def addpci_domainpath(self, pci_domain):
- """ Add a pci_domain number to a chip """
- if ((pci_domain < 0) or (pci_domain > 0xffff)):
- fatal("Invalid pci_domain: 0x%x is out of the range 0 to 0xffff" % pci_domain)
- self.set_path(".type=DEVICE_PATH_PCI_DOMAIN,{.pci_domain={ .domain = 0x%x }}" % (pci_domain))
-
- def addapic_clusterpath(self, cluster):
- """ Add an apic cluster to a chip """
- if ((cluster < 0) or (cluster > 15)):
- fatal("Invalid apic cluster: %d is out of the range 0 to ff" % cluster)
- self.set_path(".type=DEVICE_PATH_APIC_CLUSTER,{.apic_cluster={ .cluster = 0x%x }}" % (cluster))
-
- def addcpupath(self, cpu_id):
- """ Add a relative path to a cpu device hanging off our parent """
- if ((cpu_id < 0) or (cpu_id > 255)):
- fatal("Invalid device")
- self.set_path(".type=DEVICE_PATH_CPU,{.cpu={ .id = 0x%x }}" % (cpu_id))
-
-
- def addcpu_buspath(self, id):
- """ Add a cpu_bus to a chip """
- if ((id < 0) or (id > 255)):
- fatal("Invalid device")
- self.set_path(".type=DEVICE_PATH_CPU_BUS,{.cpu_bus={ .id = 0x%x }}" % (id))
-
-
-# -----------------------------------------------------------------------------
-# statements
-# -----------------------------------------------------------------------------
-
-def getdict(dict, name):
- if name not in dict.keys():
- debug.info(debug.dict, "Undefined: %s" % name)
- return 0
- v = dict.get(name, 0)
- debug.info(debug.dict, "getdict %s returning %s" % (name, v))
- return v
-
-def setdict(dict, name, value):
- debug.info(debug.dict, "setdict sets %s to %s" % (name, value))
- if name in dict.keys() and not dict[name] == value:
- print "Collision in dict: %s is %s, shall be set to %s" % (name, dict[name], value)
- dict[name] = value
-
-
-def addconfig(path):
- global partstack
- curpart = partstack.tos()
- curpart.addconfig(path)
-
-def addregister(field, value):
- global partstack
- curpart = partstack.tos()
- curpart.addregister(field, value)
-
-def devicepart(type):
- global curimage, partstack
- newpart = partobj(curimage, 0, partstack.tos(), type, \
- '', 0, 'device')
- #print "Configuring PART %s" % (type)
- partstack.push(newpart)
- #print " new PART tos is now %s\n" %partstack.tos().info()
- # just push TOS, so that we can pop later.
-
-def part(type, path, file, name):
- global curimage, partstack
- partdir = os.path.join(type, path)
- srcdir = os.path.join(treetop, 'src')
- fulldir = os.path.join(srcdir, partdir)
- type_name = flatten_name(partdir)
- #print "PART(%s, %s, %s, %s)\n" % (type, path, file, name)
- newpart = partobj(curimage, fulldir, partstack.tos(), type, \
- type_name, name, 'chip')
- #print "Configuring PART %s, path %s" % (type, path)
- partstack.push(newpart)
-
-def partpop():
- global partstack
- curpart = partstack.tos()
- if (curpart == 0):
- fatal("Trying to pop non-existent part")
- #print "End PART %s" % curpart.part
- oldpart = partstack.pop()
- #print "partstack.pop, TOS is now %s\n" % oldpart.info()
-
-#=============================================================================
-# MISC FUNCTIONS
-#=============================================================================
-def dequote(str):
- a = re.sub("^\"", "", str)
- a = re.sub("\"$", "", a)
- # highly un-intuitive, need four \!
- a = re.sub("\\\\\"", "\"", a)
- return a
-
-def flatten_name(str):
- a = re.sub("[/-]", "_", str)
- return a
-%%
-parser Config:
- ignore: r'\s+'
- ignore: "#.*?\r?\n"
-
- # less general tokens should come first, otherwise they get matched
- # by the re's
- token COMMENT: 'comment'
- token CPU: 'cpu'
- token CPU_BUS: 'cpu_bus'
- token CHIP: 'chip'
- token DEVICE: 'device'
- token DEVICE_ID: 'device_id'
- token DRQ: 'drq'
- token END: 'end'
- token EOF: '$'
- token EQ: '='
- token FORMAT: 'format'
- token IO: 'io'
- token IRQ: 'irq'
- token MEM: 'mem'
- token NEVER: 'never'
- token NONE: 'none'
- token PMC: 'pmc'
- token PRINT: 'print'
- token REGISTER: 'register'
- token VENDOR_ID: 'vendor_id'
- token WRITE: 'write'
- token NUM: '[0-9]+'
- token HEX_NUM: '[0-9a-fA-F]+'
- token HEX_PREFIX: '0x'
- # Why is path separate? Because paths to resources have to at least
- # have a slash, we thinks
- token PATH: r'[-a-zA-Z0-9_.][-a-zA-Z0-9/_.]+[-a-zA-Z0-9_.]+'
- # Dir's on the other hand are abitrary
- # this may all be stupid.
- token RULE: r'[-a-zA-Z0-9_$()./]+[-a-zA-Z0-9_ $()./]+[-a-zA-Z0-9_$()./]+'
- token ID: r'[a-zA-Z_.]+[a-zA-Z0-9_.]*'
- token STR: r'"([^\\"]+|\\.)*"'
- token RAWTEXT: r'.*'
- token ON: 'on'
- token OFF: 'off'
- token PCI: 'pci'
- token PNP: 'pnp'
- token I2C: 'i2c'
- token APIC: 'apic'
- token APIC_CLUSTER: 'apic_cluster'
- token CPU: 'cpu'
- token CPU_BUS: 'cpu_bus'
- token PCI_DOMAIN: 'pci_domain'
-
-
- rule expr: logical {{ l = logical }}
- ( "&&" logical {{ l = l and logical }}
- | "[|][|]" logical {{ l = l or logical }}
- )* {{ return l }}
-
- rule logical: factor {{ n = factor }}
- ( "[+]" factor {{ n = n+factor }}
- | "-" factor {{ n = n-factor }}
- )* {{ return n }}
-
- rule factor: term {{ v = term }}
- ( "[*]" term {{ v = v*term }}
- | "/" term {{ v = v/term }}
- | "<<" term {{ v = v << term }}
- | ">=" term {{ v = (v < term)}}
- )* {{ return v }}
-
- # A term is a number, variable, or an expression surrounded by parentheses
- rule term: NUM {{ return long(NUM, 10) }}
- | HEX_PREFIX HEX_NUM {{ return long(HEX_NUM, 16) }}
- | ID {{ return lookup(ID) }}
- | unop {{ return unop }}
- | "\\(" expr "\\)" {{ return expr }}
-
- rule unop: "!" expr {{ return not(expr) }}
-
- rule partend<<C>>: (stmt<<C>>)* END {{ if (C): partpop()}}
-
- # This is needed because the legacy cpu command could not distinguish
- # between cpu vendors. It should just be PATH, but getting this change
- # into the source tree will be tricky...
- # DO NOT USE ID AS IT MAY GO AWAY IN THE FUTURE
- rule partid: ID {{ return ID }}
- | PATH {{ return PATH }}
-
- rule parttype: CHIP {{ return '' }}
-
- rule partdef<<C>>: {{ name = 0 }}
- parttype partid
- [ STR {{ name = dequote(STR) }}
- ] {{ if (C): part(parttype, partid, 'Config.lb', name) }}
- partend<<C>>
-
- rule field: STR {{ return STR }}
-
- rule register<<C>>: REGISTER field '=' STR {{ if (C): addregister(field, STR) }}
-
- rule enable<<C>>: {{ val = 1 }}
- ( ON {{ val = 1 }}
- | OFF {{ val = 0 }}
- ) {{ if(C): partstack.tos().set_enabled(val) }}
-
- rule resource<<C>>: {{ type = "" }}
- ( IO {{ type = "IORESOURCE_FIXED | IORESOURCE_ASSIGNED | IORESOURCE_IO" }}
- | MEM {{ type = "IORESOURCE_FIXED | IORESOURCE_ASSIGNED | IORESOURCE_MEM" }}
- | IRQ {{ type = "IORESOURCE_FIXED | IORESOURCE_ASSIGNED | IORESOURCE_IRQ" }}
- | DRQ {{ type = "IORESOURCE_FIXED | IORESOURCE_ASSIGNED | IORESOURCE_DRQ" }}
- )
- term '=' {{ index = term }}
- term {{ value = term }}
- {{ if (C): partstack.tos().add_resource(type, index, value) }}
-
-
- rule resources<<C>>: {{ if (C): partstack.tos().start_resources() }}
- ( resource<<C>> )*
- {{ if (C): partstack.tos().end_resources() }}
-
-
- rule pci<<C>>: PCI {{ if (C): devicepart('pci') }}
-
- HEX_NUM {{ slot = int(HEX_NUM,16) }}
- '.' HEX_NUM {{ function = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addpcipath(slot, function) }}
- rule pci_domain<<C>>:
- PCI_DOMAIN {{ if (C): devicepart('pci_domain') }}
- HEX_NUM {{ pci_domain = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addpci_domainpath(pci_domain) }}
-
- rule pnp<<C>>: PNP {{ if (C): devicepart('pnp') }}
- HEX_NUM {{ port = int(HEX_NUM,16) }}
- '.' HEX_NUM {{ device = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addpnppath(port, device) }}
-
- rule i2c<<C>>: I2C {{ if (C): devicepart('i2c') }}
- HEX_NUM {{ device = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addi2cpath(device) }}
-
- rule apic<<C>>: APIC {{ if (C): devicepart('apic') }}
- HEX_NUM {{ apic_id = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addapicpath(apic_id) }}
-
- rule apic_cluster<<C>>: APIC_CLUSTER {{ if (C): devicepart('apic_cluster') }}
- HEX_NUM {{ cluster = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addapic_clusterpath(cluster) }}
-
- rule cpu<<C>>: CPU {{ if (C): devicepart('cpu') }}
- HEX_NUM {{ id = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addcpupath(id) }}
-
- rule cpu_bus<<C>>: CPU_BUS {{ if (C): devicepart('cpu_bus') }}
- HEX_NUM {{ id = int(HEX_NUM, 16) }}
- {{ if (C): partstack.tos().addcpu_buspath(id) }}
-
- rule dev_path<<C>>:
- pci<<C>> {{ return pci }}
- | pci_domain<<C>> {{ return pci_domain }}
- | pnp<<C>> {{ return pnp }}
- | i2c<<C>> {{ return i2c }}
- | apic<<C>> {{ return apic }}
- | apic_cluster<<C>> {{ return apic_cluster }}
- | cpu<<C>> {{ return cpu }}
- | cpu_bus<<C>> {{ return cpu_bus }}
-
- rule prtval: expr {{ return str(expr) }}
- | STR {{ return STR }}
-
- rule prtlist: prtval {{ el = "%(" + prtval }}
- ( "," prtval {{ el = el + "," + prtval }}
- )* {{ return el + ")" }}
-
- rule prtstmt<<C>>: PRINT STR {{ val = STR }}
- [ "," prtlist {{ val = val + prtlist }}
- ] {{ if (C): print eval(val) }}
-
- rule device<<C>>: DEVICE dev_path<<C>>
- enable<<C>>
- resources<<C>>
- partend<<C>>
-
- rule stmt<<C>>:
- partdef<<C>> {{ return partdef }}
- | prtstmt<<C>> {{ return prtstmt }}
- | register<<C>> {{ return register }}
- | device<<C>> {{ return device }}
-
- rule value: STR {{ return dequote(STR) }}
- | expr {{ return expr }}
-
- rule devicetree: partdef<<1>>
- EOF {{ return 1 }}
-
- rule wrstr<<ID>>: STR {{ setwrite(ID, dequote(STR)) }}
-
-%%
-
-#=============================================================================
-# FILE OUTPUT
-#=============================================================================
-
-def dumptree(part, lvl):
- debug.info(debug.dumptree, "DUMPTREE ME is")
- print "%s " % part
- part.dumpme(lvl)
- # dump the siblings -- actually are there any? not sure
- # siblings are:
- debug.info(debug.dumptree, "DUMPTREE SIBLINGS are")
- kid = part.next_sibling
- while (kid):
- kid.dumpme(lvl)
- kid = kid.next_sibling
- # dump the kids
- debug.info(debug.dumptree, "DUMPTREE KIDS are")
- #for kid in part.children:
- if (part.children):
- dumptree(part.children, lvl+1)
- kid = part.next_sibling
- while (kid):
- if (kid.children):
- dumptree(kid.children, lvl + 1)
- kid = kid.next_sibling
- debug.info(debug.dumptree, "DONE DUMPTREE")
-
-def writecode(image):
- filename = os.path.join(img_dir, "static.c")
- print " SCONFIG ", join(filename.split('/')[-4:], '/')
- file = safe_open(filename, 'w+')
- file.write("#include <device/device.h>\n")
- file.write("#include <device/pci.h>\n")
- for path in image.getconfigincludes().values():
- file.write("#include \"%s\"\n" % path)
- file.write("\n/* pass 0 */\n")
- gencode(image.getroot(), file, 0)
- file.write("\n/* pass 1 */\n")
- gencode(image.getroot(), file, 1)
- file.close()
-
-def gencode(part, file, pass_num):
- debug.info(debug.gencode, "GENCODE ME is")
- part.gencode(file, pass_num)
- # dump the siblings -- actually are there any? not sure
- debug.info(debug.gencode, "GENCODE SIBLINGS are")
- kid = part.next_sibling
- while (kid):
- kid.gencode(file, pass_num)
- kid = kid.next_sibling
- # now dump the children
- debug.info(debug.gencode, "GENCODE KIDS are")
- if (part.children):
- gencode(part.children, file, pass_num)
- kid = part.next_sibling
- while (kid):
- if (kid.children):
- gencode(kid.children, file, pass_num)
- kid = kid.next_sibling
- debug.info(debug.gencode, "DONE GENCODE")
-
-def writegraph(image):
- filename = os.path.join(img_dir, "static.dot")
- print " SCONFIG ", join(filename.split('/')[-4:], '/')
- file = safe_open(filename, 'w+')
- file.write("digraph devicetree {\n")
- file.write(" rankdir=LR\n")
- genranks(image.getroot(), file, 0)
- gennodes(image.getroot(), file)
- gengraph(image.getroot(), file)
- file.write("}\n")
- file.close()
-
-def genranks(part, file, level):
- #file.write(" # Level %d\n" % level )
- file.write(" { rank = same; \"dev_%s_%d\"" % (part.type_name,part.instance ))
- sib = part.next_sibling
- while (sib):
- file.write("; \"dev_%s_%d\"" % (sib.type_name, sib.instance))
- sib = sib.next_sibling
- file.write("}\n" )
- # now dump the children
- if (part.children):
- genranks(part.children, file, level + 1)
-
- kid = part.next_sibling
- while (kid):
- if (kid.children):
- genranks(kid.children, file, level + 1)
- kid = kid.next_sibling
-
-
-def gennodes(part, file):
- file.write(" dev_%s_%d[shape=record, label=\"%s\"];\n" % (part.type_name,part.instance,part.graph_name() ))
- sib = part.next_sibling
- while (sib):
- file.write(" dev_%s_%d[shape=record, label=\"%s\"];\n" % (sib.type_name,sib.instance,sib.graph_name() ))
- sib = sib.next_sibling
- # now dump the children
- if (part.children):
- gennodes(part.children, file)
-
- kid = part.next_sibling
- while (kid):
- if (kid.children):
- gennodes(kid.children, file)
- kid = kid.next_sibling
-
-
-def gengraph(part, file):
- if (part.parent != part):
- file.write(" dev_%s_%d -> dev_%s_%d;\n" % \
- (part.parent.type_name, part.parent.instance, \
- part.type_name, part.instance ))
- sib = part.next_sibling
- while (sib):
- file.write(" dev_%s_%d -> dev_%s_%d;\n" % \
- (sib.parent.type_name, sib.parent.instance, \
- sib.type_name, sib.instance ))
- sib = sib.next_sibling
-
- kid = part.next_sibling
- while (kid):
- if (kid.children):
- gengraph(kid.children, file)
- kid = kid.next_sibling
-
- if (part.children):
- gengraph(part.children, file)
-
-#=============================================================================
-# MAIN PROGRAM
-#=============================================================================
-if __name__=='__main__':
- from sys import argv
- if (len(argv) < 4):
- fatal("Args: <file> <path to coreboot> <output-dir>")
-
- file = "devicetree.cb"
- partdir = os.path.join("mainboard", sys.argv[1])
- treetop = argv[2]
- srcdir = os.path.join(treetop, 'src')
- fulldir = os.path.join(srcdir, partdir)
- type_name = flatten_name(partdir)
- config_file = os.path.join(fulldir, file)
-
- curimage = romimage("new")
- image = curimage
-
- newpart = partobj(curimage, fulldir, partstack.tos(), 'mainboard', \
- 'mainboard', 0, 'chip')
- #print "Configuring PART %s, path %s" % (type, path)
- image.setroot(newpart);
- partstack.push(newpart)
-
- fp = safe_open(config_file, 'r')
- if (not parse('devicetree', fp.read())):
- fatal("Could not parse file")
- partstack.pop()
-
- img_dir = argv[3]
-
- #debug.info(debug.dumptree, "DEVICE TREE:")
- #dumptree(curimage.getroot(), 0)
-
- writecode(image)
- writegraph(image)
-
- sys.exit(0)
diff --git a/util/sconfig/lex.yy.c_shipped b/util/sconfig/lex.yy.c_shipped
new file mode 100644
index 0000000000..c4b53e7480
--- /dev/null
+++ b/util/sconfig/lex.yy.c_shipped
@@ -0,0 +1,1932 @@
+#line 2 "/home/Patrick/work/coreboot/util/sconfig/lex.yy.c_shipped"
+
+#line 4 "/home/Patrick/work/coreboot/util/sconfig/lex.yy.c_shipped"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 5
+#define YY_FLEX_SUBMINOR_VERSION 35
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+#endif /* ! C99 */
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#endif /* ! FLEXINT_H */
+
+#ifdef __cplusplus
+
+/* The "const" storage-class-modifier is valid. */
+#define YY_USE_CONST
+
+#else /* ! __cplusplus */
+
+/* C99 requires __STDC__ to be defined as 1. */
+#if defined (__STDC__)
+
+#define YY_USE_CONST
+
+#endif /* defined (__STDC__) */
+#endif /* ! __cplusplus */
+
+#ifdef YY_USE_CONST
+#define yyconst const
+#else
+#define yyconst
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an unsigned
+ * integer for use as an array index. If the signed char is negative,
+ * we want to instead treat it as an 8-bit unsigned char, hence the
+ * double cast.
+ */
+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN (yy_start) = 1 + 2 *
+
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START (((yy_start) - 1) / 2)
+#define YYSTATE YY_START
+
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE yyrestart(yyin )
+
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#define YY_BUF_SIZE 16384
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+extern int yyleng;
+
+extern FILE *yyin, *yyout;
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = (yy_hold_char); \
+ YY_RESTORE_YY_MORE_OFFSET \
+ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+
+#define unput(c) yyunput( c, (yytext_ptr) )
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ yy_size_t yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via yyrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* Stack of input buffers. */
+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
+static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
+ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
+ : NULL)
+
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
+
+/* yy_hold_char holds the character lost when yytext is formed. */
+static char yy_hold_char;
+static int yy_n_chars; /* number of characters read into yy_ch_buf */
+int yyleng;
+
+/* Points to current character in buffer. */
+static char *yy_c_buf_p = (char *) 0;
+static int yy_init = 0; /* whether we need to initialize */
+static int yy_start = 0; /* start state number */
+
+/* Flag which is used to allow yywrap()'s to do buffer switches
+ * instead of setting up a fresh yyin. A bit of a hack ...
+ */
+static int yy_did_buffer_switch_on_eof;
+
+void yyrestart (FILE *input_file );
+void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer );
+YY_BUFFER_STATE yy_create_buffer (FILE *file,int size );
+void yy_delete_buffer (YY_BUFFER_STATE b );
+void yy_flush_buffer (YY_BUFFER_STATE b );
+void yypush_buffer_state (YY_BUFFER_STATE new_buffer );
+void yypop_buffer_state (void );
+
+static void yyensure_buffer_stack (void );
+static void yy_load_buffer_state (void );
+static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file );
+
+#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER )
+
+YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size );
+YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str );
+YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len );
+
+void *yyalloc (yy_size_t );
+void *yyrealloc (void *,yy_size_t );
+void yyfree (void * );
+
+#define yy_new_buffer yy_create_buffer
+
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ yyensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer(yyin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ yyensure_buffer_stack (); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer(yyin,YY_BUF_SIZE ); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+typedef unsigned char YY_CHAR;
+
+FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0;
+
+typedef int yy_state_type;
+
+extern int yylineno;
+
+int yylineno = 1;
+
+extern char *yytext;
+#define yytext_ptr yytext
+
+static yy_state_type yy_get_previous_state (void );
+static yy_state_type yy_try_NUL_trans (yy_state_type current_state );
+static int yy_get_next_buffer (void );
+static void yy_fatal_error (yyconst char msg[] );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ (yytext_ptr) = yy_bp; \
+ yyleng = (size_t) (yy_cp - yy_bp); \
+ (yy_hold_char) = *yy_cp; \
+ *yy_cp = '\0'; \
+ (yy_c_buf_p) = yy_cp;
+
+#define YY_NUM_RULES 25
+#define YY_END_OF_BUFFER 26
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static yyconst flex_int16_t yy_accept[86] =
+ { 0,
+ 0, 0, 26, 24, 1, 3, 24, 24, 24, 21,
+ 21, 19, 22, 22, 22, 22, 22, 24, 24, 24,
+ 24, 24, 1, 3, 24, 0, 24, 0, 2, 21,
+ 22, 24, 24, 24, 22, 24, 24, 24, 17, 24,
+ 24, 7, 24, 24, 24, 23, 23, 20, 24, 24,
+ 24, 16, 18, 11, 15, 8, 9, 10, 24, 12,
+ 4, 24, 24, 24, 24, 24, 24, 24, 24, 5,
+ 24, 24, 24, 24, 24, 24, 24, 6, 24, 24,
+ 24, 14, 24, 13, 0
+ } ;
+
+static yyconst flex_int32_t yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 4, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 1, 5, 6, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 7, 1, 8, 7, 9,
+ 7, 7, 7, 7, 7, 7, 7, 1, 1, 1,
+ 10, 1, 1, 1, 11, 11, 11, 11, 11, 11,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 12, 1, 13, 11, 14, 15,
+
+ 16, 17, 18, 19, 20, 1, 1, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 1, 32,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+static yyconst flex_int32_t yy_meta[33] =
+ { 0,
+ 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1
+ } ;
+
+static yyconst flex_int16_t yy_base[91] =
+ { 0,
+ 0, 0, 146, 0, 143, 147, 141, 31, 35, 32,
+ 111, 0, 43, 46, 49, 65, 52, 53, 46, 21,
+ 126, 0, 139, 147, 62, 135, 76, 136, 147, 0,
+ 75, 86, 118, 117, 78, 110, 120, 120, 0, 107,
+ 115, 0, 111, 105, 111, 0, 147, 0, 114, 102,
+ 106, 0, 0, 0, 0, 0, 113, 0, 104, 111,
+ 0, 108, 106, 92, 105, 102, 84, 78, 85, 0,
+ 83, 88, 68, 83, 64, 62, 69, 0, 59, 53,
+ 54, 0, 39, 0, 147, 41, 109, 111, 113, 115
+ } ;
+
+static yyconst flex_int16_t yy_def[91] =
+ { 0,
+ 85, 1, 85, 86, 85, 85, 86, 87, 88, 86,
+ 10, 86, 10, 10, 10, 10, 10, 86, 86, 86,
+ 86, 86, 85, 85, 87, 89, 88, 90, 85, 10,
+ 10, 10, 86, 86, 10, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 85, 32, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
+ 86, 86, 86, 86, 0, 85, 85, 85, 85, 85
+ } ;
+
+static yyconst flex_int16_t yy_nxt[180] =
+ { 0,
+ 4, 5, 6, 7, 8, 9, 10, 11, 10, 12,
+ 13, 4, 14, 15, 16, 17, 13, 4, 4, 18,
+ 4, 4, 4, 19, 20, 4, 21, 4, 4, 4,
+ 4, 4, 26, 26, 43, 22, 28, 29, 30, 30,
+ 30, 22, 31, 44, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 38, 41, 26, 26, 84, 46, 34, 42, 83,
+ 33, 31, 31, 31, 37, 82, 39, 28, 29, 40,
+ 35, 31, 31, 31, 31, 31, 31, 81, 80, 79,
+ 78, 36, 48, 48, 48, 77, 48, 76, 48, 48,
+
+ 48, 48, 48, 75, 74, 73, 72, 71, 51, 25,
+ 25, 27, 27, 26, 26, 28, 28, 70, 69, 68,
+ 67, 66, 65, 64, 63, 62, 61, 60, 59, 58,
+ 57, 56, 55, 54, 53, 52, 50, 49, 29, 47,
+ 23, 45, 32, 24, 23, 85, 3, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85
+ } ;
+
+static yyconst flex_int16_t yy_chk[180] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 8, 8, 20, 8, 9, 9, 10, 10,
+ 10, 86, 10, 20, 10, 10, 10, 10, 10, 13,
+ 13, 13, 14, 14, 14, 15, 15, 15, 17, 17,
+ 17, 18, 19, 25, 25, 83, 25, 15, 19, 81,
+ 14, 16, 16, 16, 17, 80, 18, 27, 27, 18,
+ 16, 31, 31, 31, 35, 35, 35, 79, 77, 76,
+ 75, 16, 32, 32, 32, 74, 32, 73, 32, 32,
+
+ 32, 32, 32, 72, 71, 69, 68, 67, 35, 87,
+ 87, 88, 88, 89, 89, 90, 90, 66, 65, 64,
+ 63, 62, 60, 59, 57, 51, 50, 49, 45, 44,
+ 43, 41, 40, 38, 37, 36, 34, 33, 28, 26,
+ 23, 21, 11, 7, 5, 3, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85
+ } ;
+
+static yy_state_type yy_last_accepting_state;
+static char *yy_last_accepting_cpos;
+
+extern int yy_flex_debug;
+int yy_flex_debug = 0;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+char *yytext;
+#line 1 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+#line 2 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+/*
+ * sconfig, coreboot device tree compiler
+ *
+ * Copyright (C) 2010 coresystems GmbH
+ * written by Patrick Georgi <patrick.georgi@coresystems.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#include "sconfig.tab.h"
+
+int linenum = 0;
+#line 537 "/home/Patrick/work/coreboot/util/sconfig/lex.yy.c_shipped"
+
+#define INITIAL 0
+
+#ifndef YY_NO_UNISTD_H
+/* Special case for "unistd.h", since it is non-ANSI. We include it way
+ * down here because we want the user's section 1 to have been scanned first.
+ * The user has a chance to override it with an option.
+ */
+#include <unistd.h>
+#endif
+
+#ifndef YY_EXTRA_TYPE
+#define YY_EXTRA_TYPE void *
+#endif
+
+static int yy_init_globals (void );
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int yylex_destroy (void );
+
+int yyget_debug (void );
+
+void yyset_debug (int debug_flag );
+
+YY_EXTRA_TYPE yyget_extra (void );
+
+void yyset_extra (YY_EXTRA_TYPE user_defined );
+
+FILE *yyget_in (void );
+
+void yyset_in (FILE * in_str );
+
+FILE *yyget_out (void );
+
+void yyset_out (FILE * out_str );
+
+int yyget_leng (void );
+
+char *yyget_text (void );
+
+int yyget_lineno (void );
+
+void yyset_lineno (int line_number );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int yywrap (void );
+#else
+extern int yywrap (void );
+#endif
+#endif
+
+ static void yyunput (int c,char *buf_ptr );
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char *,yyconst char *,int );
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * );
+#endif
+
+#ifndef YY_NO_INPUT
+
+#ifdef __cplusplus
+static int yyinput (void );
+#else
+static int input (void );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#define YY_READ_BUF_SIZE 8192
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO fwrite( yytext, yyleng, 1, yyout )
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
+ { \
+ int c = '*'; \
+ int n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else \
+ { \
+ errno=0; \
+ while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(yyin); \
+ } \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int yylex (void);
+
+#define YY_DECL int yylex (void)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp, *yy_bp;
+ register int yy_act;
+
+#line 27 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+
+#line 721 "/home/Patrick/work/coreboot/util/sconfig/lex.yy.c_shipped"
+
+ if ( !(yy_init) )
+ {
+ (yy_init) = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! (yy_start) )
+ (yy_start) = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ yyensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer(yyin,YY_BUF_SIZE );
+ }
+
+ yy_load_buffer_state( );
+ }
+
+ while ( 1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = (yy_c_buf_p);
+
+ /* Support of yytext. */
+ *yy_cp = (yy_hold_char);
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = (yy_start);
+yy_match:
+ do
+ {
+ register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 86 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ ++yy_cp;
+ }
+ while ( yy_base[yy_current_state] != 147 );
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+ if ( yy_act == 0 )
+ { /* have to back up */
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+ yy_act = yy_accept[yy_current_state];
+ }
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = (yy_hold_char);
+ yy_cp = (yy_last_accepting_cpos);
+ yy_current_state = (yy_last_accepting_state);
+ goto yy_find_action;
+
+case 1:
+YY_RULE_SETUP
+#line 28 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{}
+ YY_BREAK
+case 2:
+/* rule 2 can match eol */
+YY_RULE_SETUP
+#line 29 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{linenum++;}
+ YY_BREAK
+case 3:
+/* rule 3 can match eol */
+YY_RULE_SETUP
+#line 30 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{linenum++;}
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+#line 31 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{return(CHIP);}
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 32 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{return(DEVICE);}
+ YY_BREAK
+case 6:
+YY_RULE_SETUP
+#line 33 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{return(REGISTER);}
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+#line 34 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=1; return(BOOL);}
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 35 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=0; return(BOOL);}
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 36 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=PCI; return(BUS);}
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+#line 37 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=PNP; return(BUS);}
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+#line 38 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=I2C; return(BUS);}
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 39 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=APIC; return(BUS);}
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 40 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=APIC_CLUSTER; return(BUS);}
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+#line 41 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=PCI_DOMAIN; return(BUS);}
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+#line 42 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=IRQ; return(RESOURCE);}
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+#line 43 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=DRQ; return(RESOURCE);}
+ YY_BREAK
+case 17:
+YY_RULE_SETUP
+#line 44 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.number=IO; return(RESOURCE);}
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+#line 45 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{return(END);}
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+#line 46 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{return(EQUALS);}
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+#line 47 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(NUMBER);}
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+#line 48 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(NUMBER);}
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+#line 49 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(NUMBER);}
+ YY_BREAK
+case 23:
+/* rule 23 can match eol */
+YY_RULE_SETUP
+#line 50 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.string = malloc(yyleng-1); strncpy(yylval.string, yytext+1, yyleng-2); yylval.string[yyleng-2]='\0'; return(STRING);}
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 51 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+{yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(STRING);}
+ YY_BREAK
+case 25:
+YY_RULE_SETUP
+#line 52 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+ECHO;
+ YY_BREAK
+#line 932 "/home/Patrick/work/coreboot/util/sconfig/lex.yy.c_shipped"
+case YY_STATE_EOF(INITIAL):
+ yyterminate();
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = (yy_hold_char);
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * yylex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state );
+
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++(yy_c_buf_p);
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = (yy_c_buf_p);
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ (yy_did_buffer_switch_on_eof) = 0;
+
+ if ( yywrap( ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) =
+ (yytext_ptr) + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ (yy_c_buf_p) =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
+
+ yy_current_state = yy_get_previous_state( );
+
+ yy_cp = (yy_c_buf_p);
+ yy_bp = (yytext_ptr) + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+} /* end of yylex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (void)
+{
+ register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ register char *source = (yytext_ptr);
+ register int number_to_move, i;
+ int ret_val;
+
+ if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
+
+ else
+ {
+ int num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
+
+ int yy_c_buf_p_offset =
+ (int) ((yy_c_buf_p) - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = 0;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ (yy_n_chars), (size_t) num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ if ( (yy_n_chars) == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ yyrestart(yyin );
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ }
+
+ (yy_n_chars) += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
+
+ (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (void)
+{
+ register yy_state_type yy_current_state;
+ register char *yy_cp;
+
+ yy_current_state = (yy_start);
+
+ for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
+ {
+ register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 86 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+{
+ register int yy_is_jam;
+ register char *yy_cp = (yy_c_buf_p);
+
+ register YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ (yy_last_accepting_state) = yy_current_state;
+ (yy_last_accepting_cpos) = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 86 )
+ yy_c = yy_meta[(unsigned int) yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ yy_is_jam = (yy_current_state == 85);
+
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+ static void yyunput (int c, register char * yy_bp )
+{
+ register char *yy_cp;
+
+ yy_cp = (yy_c_buf_p);
+
+ /* undo effects of setting up yytext */
+ *yy_cp = (yy_hold_char);
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ register int number_to_move = (yy_n_chars) + 2;
+ register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
+ register char *source =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
+
+ while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+ (yytext_ptr) = yy_bp;
+ (yy_hold_char) = *yy_cp;
+ (yy_c_buf_p) = yy_cp;
+}
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (void)
+#else
+ static int input (void)
+#endif
+
+{
+ int c;
+
+ *(yy_c_buf_p) = (yy_hold_char);
+
+ if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
+ /* This was really a NUL. */
+ *(yy_c_buf_p) = '\0';
+
+ else
+ { /* need more input */
+ int offset = (yy_c_buf_p) - (yytext_ptr);
+ ++(yy_c_buf_p);
+
+ switch ( yy_get_next_buffer( ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ yyrestart(yyin );
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( yywrap( ) )
+ return EOF;
+
+ if ( ! (yy_did_buffer_switch_on_eof) )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput();
+#else
+ return input();
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ (yy_c_buf_p) = (yytext_ptr) + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
+ *(yy_c_buf_p) = '\0'; /* preserve yytext */
+ (yy_hold_char) = *++(yy_c_buf_p);
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ *
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void yyrestart (FILE * input_file )
+{
+
+ if ( ! YY_CURRENT_BUFFER ){
+ yyensure_buffer_stack ();
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer(yyin,YY_BUF_SIZE );
+ }
+
+ yy_init_buffer(YY_CURRENT_BUFFER,input_file );
+ yy_load_buffer_state( );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ *
+ */
+ void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
+{
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * yypop_buffer_state();
+ * yypush_buffer_state(new_buffer);
+ */
+ yyensure_buffer_stack ();
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ yy_load_buffer_state( );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (yywrap()) processing, but the only time this flag
+ * is looked at is after yywrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+static void yy_load_buffer_state (void)
+{
+ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ (yy_hold_char) = *(yy_c_buf_p);
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ *
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE yy_create_buffer (FILE * file, int size )
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ yy_init_buffer(b,file );
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with yy_create_buffer()
+ *
+ */
+ void yy_delete_buffer (YY_BUFFER_STATE b )
+{
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ yyfree((void *) b->yy_ch_buf );
+
+ yyfree((void *) b );
+}
+
+#ifndef __cplusplus
+extern int isatty (int );
+#endif /* __cplusplus */
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a yyrestart() or at EOF.
+ */
+ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
+
+{
+ int oerrno = errno;
+
+ yy_flush_buffer(b );
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then yy_init_buffer was _probably_
+ * called from yyrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ *
+ */
+ void yy_flush_buffer (YY_BUFFER_STATE b )
+{
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ yy_load_buffer_state( );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ *
+ */
+void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
+{
+ if (new_buffer == NULL)
+ return;
+
+ yyensure_buffer_stack();
+
+ /* This block is copied from yy_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *(yy_c_buf_p) = (yy_hold_char);
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ (yy_buffer_stack_top)++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from yy_switch_to_buffer. */
+ yy_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ *
+ */
+void yypop_buffer_state (void)
+{
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ yy_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if ((yy_buffer_stack_top) > 0)
+ --(yy_buffer_stack_top);
+
+ if (YY_CURRENT_BUFFER) {
+ yy_load_buffer_state( );
+ (yy_did_buffer_switch_on_eof) = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void yyensure_buffer_stack (void)
+{
+ int num_to_alloc;
+
+ if (!(yy_buffer_stack)) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1;
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ (yy_buffer_stack_max) = num_to_alloc;
+ (yy_buffer_stack_top) = 0;
+ return;
+ }
+
+ if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ int grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = (yy_buffer_stack_max) + grow_size;
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
+ ((yy_buffer_stack),
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ );
+ if ( ! (yy_buffer_stack) )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
+ (yy_buffer_stack_max) = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size )
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return 0;
+
+ b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
+
+ b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = 0;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ yy_switch_to_buffer(b );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to yylex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ *
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * yy_scan_bytes() instead.
+ */
+YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
+{
+
+ return yy_scan_bytes(yystr,strlen(yystr) );
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
+ * scan from a @e copy of @a bytes.
+ * @param bytes the byte buffer to scan
+ * @param len the number of bytes in the buffer pointed to by @a bytes.
+ *
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len )
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = _yybytes_len + 2;
+ buf = (char *) yyalloc(n );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = yy_scan_buffer(buf,n );
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yy_fatal_error (yyconst char* msg )
+{
+ (void) fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ yytext[yyleng] = (yy_hold_char); \
+ (yy_c_buf_p) = yytext + yyless_macro_arg; \
+ (yy_hold_char) = *(yy_c_buf_p); \
+ *(yy_c_buf_p) = '\0'; \
+ yyleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the current line number.
+ *
+ */
+int yyget_lineno (void)
+{
+
+ return yylineno;
+}
+
+/** Get the input stream.
+ *
+ */
+FILE *yyget_in (void)
+{
+ return yyin;
+}
+
+/** Get the output stream.
+ *
+ */
+FILE *yyget_out (void)
+{
+ return yyout;
+}
+
+/** Get the length of the current token.
+ *
+ */
+int yyget_leng (void)
+{
+ return yyleng;
+}
+
+/** Get the current token.
+ *
+ */
+
+char *yyget_text (void)
+{
+ return yytext;
+}
+
+/** Set the current line number.
+ * @param line_number
+ *
+ */
+void yyset_lineno (int line_number )
+{
+
+ yylineno = line_number;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param in_str A readable stream.
+ *
+ * @see yy_switch_to_buffer
+ */
+void yyset_in (FILE * in_str )
+{
+ yyin = in_str ;
+}
+
+void yyset_out (FILE * out_str )
+{
+ yyout = out_str ;
+}
+
+int yyget_debug (void)
+{
+ return yy_flex_debug;
+}
+
+void yyset_debug (int bdebug )
+{
+ yy_flex_debug = bdebug ;
+}
+
+static int yy_init_globals (void)
+{
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from yylex_destroy(), so don't allocate here.
+ */
+
+ (yy_buffer_stack) = 0;
+ (yy_buffer_stack_top) = 0;
+ (yy_buffer_stack_max) = 0;
+ (yy_c_buf_p) = (char *) 0;
+ (yy_init) = 0;
+ (yy_start) = 0;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ yyin = stdin;
+ yyout = stdout;
+#else
+ yyin = (FILE *) 0;
+ yyout = (FILE *) 0;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * yylex_init()
+ */
+ return 0;
+}
+
+/* yylex_destroy is for both reentrant and non-reentrant scanners. */
+int yylex_destroy (void)
+{
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ yy_delete_buffer(YY_CURRENT_BUFFER );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ yypop_buffer_state();
+ }
+
+ /* Destroy the stack itself. */
+ yyfree((yy_buffer_stack) );
+ (yy_buffer_stack) = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * yylex() is called, initialization will occur. */
+ yy_init_globals( );
+
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
+{
+ register int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (yyconst char * s )
+{
+ register int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *yyalloc (yy_size_t size )
+{
+ return (void *) malloc( size );
+}
+
+void *yyrealloc (void * ptr, yy_size_t size )
+{
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return (void *) realloc( (char *) ptr, size );
+}
+
+void yyfree (void * ptr )
+{
+ free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#line 52 "/home/Patrick/work/coreboot/util/sconfig/sconfig.l"
+
+
+
diff --git a/util/sconfig/parsedesc.g b/util/sconfig/parsedesc.g
deleted file mode 100644
index acb8524838..0000000000
--- a/util/sconfig/parsedesc.g
+++ /dev/null
@@ -1,195 +0,0 @@
-######################################################################
-# The remainder of this file is from parsedesc.{g,py}
-
-def append(lst, x):
- "Imperative append"
- lst.append(x)
- return lst
-
-def add_inline_token(tokens, str):
- tokens.insert( 0, (str, eval(str, {}, {})) )
- return Terminal(str)
-
-def cleanup_choice(lst):
- if len(lst) == 0: return Sequence([])
- if len(lst) == 1: return lst[0]
- return apply(Choice, tuple(lst))
-
-def cleanup_sequence(lst):
- if len(lst) == 1: return lst[0]
- return apply(Sequence, tuple(lst))
-
-def cleanup_rep(node, rep):
- if rep == 'star': return Star(node)
- elif rep == 'plus': return Plus(node)
- else: return node
-
-def resolve_name(tokens, id, args):
- if id in map(lambda x: x[0], tokens):
- # It's a token
- if args:
- print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
- return Terminal(id)
- else:
- # It's a name, so assume it's a nonterminal
- return NonTerminal(id, args)
-
-%%
-parser ParserDescription:
- option: "context-insensitive-scanner"
-
- ignore: "[ \t\r\n]+"
- ignore: "#.*?\r?\n"
- token END: "$"
- token ATTR: "<<.+?>>"
- token STMT: "{{.+?}}"
- token ID: '[a-zA-Z_][a-zA-Z_0-9]*'
- token STR: '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'
- token LP: '\\('
- token RP: '\\)'
- token LB: '\\['
- token RB: '\\]'
- token OR: '[|]'
- token STAR: '[*]'
- token PLUS: '[+]'
- token QUEST: '[?]'
- token COLON: ':'
-
- rule Parser: "parser" ID ":"
- Options
- Tokens
- Rules<<Tokens>>
- END
- {{ return Generator(ID,Options,Tokens,Rules) }}
-
- rule Options: {{ opt = {} }}
- ( "option" ":" Str {{ opt[Str] = 1 }} )*
- {{ return opt }}
-
- rule Tokens: {{ tok = [] }}
- (
- "token" ID ":" Str {{ tok.append( (ID,Str) ) }}
- | "ignore" ":" Str {{ tok.append( ('#ignore',Str) ) }}
- )*
- {{ return tok }}
-
- rule Rules<<tokens>>:
- {{ rul = [] }}
- (
- "rule" ID OptParam ":" ClauseA<<tokens>>
- {{ rul.append( (ID,OptParam,ClauseA) ) }}
- )*
- {{ return rul }}
-
- rule ClauseA<<tokens>>:
- ClauseB<<tokens>>
- {{ v = [ClauseB] }}
- ( OR ClauseB<<tokens>> {{ v.append(ClauseB) }} )*
- {{ return cleanup_choice(v) }}
-
- rule ClauseB<<tokens>>:
- {{ v = [] }}
- ( ClauseC<<tokens>> {{ v.append(ClauseC) }} )*
- {{ return cleanup_sequence(v) }}
-
- rule ClauseC<<tokens>>:
- ClauseD<<tokens>>
- ( PLUS {{ return Plus(ClauseD) }}
- | STAR {{ return Star(ClauseD) }}
- | {{ return ClauseD }} )
-
- rule ClauseD<<tokens>>:
- STR {{ t = (STR, eval(STR,{},{})) }}
- {{ if t not in tokens: tokens.insert( 0, t ) }}
- {{ return Terminal(STR) }}
- | ID OptParam {{ return resolve_name(tokens, ID, OptParam) }}
- | LP ClauseA<<tokens>> RP {{ return ClauseA }}
- | LB ClauseA<<tokens>> RB {{ return Option(ClauseA) }}
- | STMT {{ return Eval(STMT[2:-2]) }}
-
- rule OptParam: [ ATTR {{ return ATTR[2:-2] }} ] {{ return '' }}
- rule Str: STR {{ return eval(STR,{},{}) }}
-%%
-
-# This replaces the default main routine
-
-yapps_options = [
- ('context-insensitive-scanner', 'context-insensitive-scanner',
- 'Scan all tokens (see docs)')
- ]
-
-def generate(inputfilename, outputfilename='', dump=0, **flags):
- """Generate a grammar, given an input filename (X.g)
- and an output filename (defaulting to X.py)."""
-
- if not outputfilename:
- if inputfilename[-2:]=='.g': outputfilename = inputfilename[:-2]+'.py'
- else: raise "Invalid Filename", outputfilename
-
- print ' SCONFIG ', join(outputfilename.split('/')[-4:], '/')
-
- DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
- preparser, postparser = None, None # Code before and after the parser desc
-
- # Read the entire file
- s = open(inputfilename,'r').read()
-
- # See if there's a separation between the pre-parser and parser
- f = find(s, DIVIDER)
- if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):]
-
- # See if there's a separation between the parser and post-parser
- f = find(s, DIVIDER)
- if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):]
-
- # Create the parser and scanner
- p = ParserDescription(ParserDescriptionScanner(s))
- if not p: return
-
- # Now parse the file
- t = wrap_error_reporter(p, 'Parser')
- if not t: return # Error
- if preparser is not None: t.preparser = preparser
- if postparser is not None: t.postparser = postparser
-
- # Check the options
- for f in t.options.keys():
- for opt,_,_ in yapps_options:
- if f == opt: break
- else:
- print 'Warning: unrecognized option', f
- # Add command line options to the set
- for f in flags.keys(): t.options[f] = flags[f]
-
- # Generate the output
- if dump:
- t.dump_information()
- else:
- t.output = open(outputfilename, 'w')
- t.generate_output()
-
-if __name__=='__main__':
- import sys, getopt
- optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['dump'])
- if not args or len(args) > 2:
- print 'Usage:'
- print ' python', sys.argv[0], '[flags] input.g [output.py]'
- print 'Flags:'
- print (' --dump' + ' '*40)[:35] + 'Dump out grammar information'
- for flag, _, doc in yapps_options:
- print (' -f' + flag + ' '*40)[:35] + doc
- else:
- # Read in the options and create a list of flags
- flags = {}
- for opt in optlist:
- for flag, name, _ in yapps_options:
- if opt == ('-f', flag):
- flags[name] = 1
- break
- else:
- if opt == ('--dump', ''):
- flags['dump'] = 1
- else:
- print 'Warning - unrecognized option: ', opt[0], opt[1]
-
- apply(generate, tuple(args), flags)
diff --git a/util/sconfig/sconfig.l b/util/sconfig/sconfig.l
new file mode 100755
index 0000000000..2f05d922f0
--- /dev/null
+++ b/util/sconfig/sconfig.l
@@ -0,0 +1,52 @@
+%{
+/*
+ * sconfig, coreboot device tree compiler
+ *
+ * Copyright (C) 2010 coresystems GmbH
+ * written by Patrick Georgi <patrick.georgi@coresystems.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#include "sconfig.tab.h"
+
+int linenum = 0;
+%}
+%option nodebug
+%%
+[ \t]+ {}
+#.*\n {linenum++;}
+\r?\n {linenum++;}
+chip {return(CHIP);}
+device {return(DEVICE);}
+register {return(REGISTER);}
+on {yylval.number=1; return(BOOL);}
+off {yylval.number=0; return(BOOL);}
+pci {yylval.number=PCI; return(BUS);}
+pnp {yylval.number=PNP; return(BUS);}
+i2c {yylval.number=I2C; return(BUS);}
+apic {yylval.number=APIC; return(BUS);}
+apic_cluster {yylval.number=APIC_CLUSTER; return(BUS);}
+pci_domain {yylval.number=PCI_DOMAIN; return(BUS);}
+irq {yylval.number=IRQ; return(RESOURCE);}
+drq {yylval.number=DRQ; return(RESOURCE);}
+io {yylval.number=IO; return(RESOURCE);}
+end {return(END);}
+= {return(EQUALS);}
+0x[0-9a-fA-F.]+ {yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(NUMBER);}
+[0-9.]+ {yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(NUMBER);}
+[0-9a-fA-F.]+ {yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(NUMBER);}
+\"[^\"]+\" {yylval.string = malloc(yyleng-1); strncpy(yylval.string, yytext+1, yyleng-2); yylval.string[yyleng-2]='\0'; return(STRING);}
+[^ \n\t]+ {yylval.string = malloc(yyleng+1); strncpy(yylval.string, yytext, yyleng); yylval.string[yyleng]='\0'; return(STRING);}
+%%
diff --git a/util/sconfig/sconfig.tab.c_shipped b/util/sconfig/sconfig.tab.c_shipped
new file mode 100644
index 0000000000..d73a503a12
--- /dev/null
+++ b/util/sconfig/sconfig.tab.c_shipped
@@ -0,0 +1,2088 @@
+
+/* A Bison parser, made by GNU Bison 2.4.1. */
+
+/* Skeleton implementation for Bison's Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "2.4.1"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 0
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+/* Using locations. */
+#define YYLSP_NEEDED 0
+
+
+
+/* Copy the first part of user declarations. */
+
+/* Line 189 of yacc.c */
+#line 1 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+
+/*
+ * sconfig, coreboot device tree compiler
+ *
+ * Copyright (C) 2010 coresystems GmbH
+ * written by Patrick Georgi <patrick.georgi@coresystems.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+
+enum devtype { chip, device };
+
+struct resource;
+struct resource {
+ int type;
+ int index;
+ int base;
+ struct resource *next;
+};
+
+struct reg;
+struct reg {
+ char *key;
+ char *value;
+ struct reg *next;
+};
+
+struct device;
+struct device {
+ int id;
+ int enabled;
+ int used;
+ int multidev;
+ int link;
+ int rescnt;
+ int chiph_exists;
+ char *ops;
+ char *name;
+ char *aliased_name;
+ char *name_underscore;
+ char *path;
+ int path_a;
+ int path_b;
+ int bustype;
+ enum devtype type;
+ struct device *parent;
+ struct device *bus;
+ struct device *next;
+ struct device *nextdev;
+ struct device *children;
+ struct device *latestchild;
+ struct device *next_sibling;
+ struct device *sibling;
+ struct device *chip;
+ struct resource *res;
+ struct reg *reg;
+} *head, *lastdev, *cur_parent, *cur_bus, root;
+
+struct header;
+struct header {
+ char *name;
+ struct header *next;
+} headers;
+
+int devcount = 0;
+
+struct device *new_dev() {
+ struct device *dev = malloc(sizeof(struct device));
+ memset(dev, 0, sizeof(struct device));
+ dev->id = ++devcount;
+ dev->parent = cur_parent;
+ dev->bus = cur_bus;
+ head->next = dev;
+ head = dev;
+ return dev;
+}
+
+int device_match(struct device *a, struct device *b) {
+ if ((a->bustype == b->bustype) && (a->bus == b->bus) && (a->path_a == b->path_a) && (a->path_b == b->path_b))
+ return 1;
+ return 0;
+}
+
+void fold_in(struct device *parent) {
+ struct device *child = parent->children;
+ struct device *latest = 0;
+ while (child != latest) {
+ if (child->children) {
+ if (!latest) latest = child->children;
+ parent->latestchild->next_sibling = child->children;
+ parent->latestchild = child->latestchild;
+ }
+ child = child->next_sibling;
+ }
+}
+
+int yywrap(void) {
+ return 1;
+}
+
+void yyerror (char const *str)
+{
+ fprintf (stderr, "%s\n", str);
+}
+
+
+/* Line 189 of yacc.c */
+#line 199 "/home/Patrick/work/coreboot/util/sconfig/sconfig.tab.c_shipped"
+
+/* Enabling traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 0
+#endif
+
+/* Enabling the token table. */
+#ifndef YYTOKEN_TABLE
+# define YYTOKEN_TABLE 0
+#endif
+
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ CHIP = 258,
+ DEVICE = 259,
+ REGISTER = 260,
+ BOOL = 261,
+ BUS = 262,
+ RESOURCE = 263,
+ END = 264,
+ EQUALS = 265,
+ HEX = 266,
+ STRING = 267,
+ PCI = 268,
+ PNP = 269,
+ I2C = 270,
+ APIC = 271,
+ APIC_CLUSTER = 272,
+ PCI_DOMAIN = 273,
+ IRQ = 274,
+ DRQ = 275,
+ IO = 276,
+ NUMBER = 277
+ };
+#endif
+
+
+
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+typedef union YYSTYPE
+{
+
+/* Line 214 of yacc.c */
+#line 125 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+
+ struct device *device;
+ char *string;
+ int number;
+
+
+
+/* Line 214 of yacc.c */
+#line 265 "/home/Patrick/work/coreboot/util/sconfig/sconfig.tab.c_shipped"
+} YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+/* Copy the second part of user declarations. */
+
+
+/* Line 264 of yacc.c */
+#line 277 "/home/Patrick/work/coreboot/util/sconfig/sconfig.tab.c_shipped"
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#elif (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+typedef signed char yytype_int8;
+#else
+typedef short int yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short int yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short int yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned int
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+# if YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(msgid) dgettext ("bison-runtime", msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(msgid) msgid
+# endif
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(e) ((void) (e))
+#else
+# define YYUSE(e) /* empty */
+#endif
+
+/* Identity function, used to suppress warnings about constant conditions. */
+#ifndef lint
+# define YYID(n) (n)
+#else
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static int
+YYID (int yyi)
+#else
+static int
+YYID (yyi)
+ int yyi;
+#endif
+{
+ return yyi;
+}
+#endif
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef _STDLIB_H
+# define _STDLIB_H 1
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's `empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined _STDLIB_H \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef _STDLIB_H
+# define _STDLIB_H 1
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss_alloc;
+ YYSTYPE yyvs_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+/* Copy COUNT objects from FROM to TO. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(To, From, Count) \
+ __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+# else
+# define YYCOPY(To, From, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (To)[yyi] = (From)[yyi]; \
+ } \
+ while (YYID (0))
+# endif
+# endif
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (YYID (0))
+
+#endif
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 9
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 23
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 23
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 11
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 16
+/* YYNRULES -- Number of states. */
+#define YYNSTATES 30
+
+/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 277
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
+static const yytype_uint8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22
+};
+
+#if YYDEBUG
+/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
+ YYRHS. */
+static const yytype_uint8 yyprhs[] =
+{
+ 0, 0, 3, 5, 7, 9, 12, 15, 16, 19,
+ 22, 23, 24, 30, 31, 39, 44
+};
+
+/* YYRHS -- A `-1'-separated list of the rules' RHS. */
+static const yytype_int8 yyrhs[] =
+{
+ 24, 0, -1, 25, -1, 28, -1, 30, -1, 26,
+ 25, -1, 26, 33, -1, -1, 27, 25, -1, 27,
+ 32, -1, -1, -1, 3, 12, 29, 26, 9, -1,
+ -1, 4, 7, 22, 6, 31, 27, 9, -1, 8,
+ 22, 10, 22, -1, 5, 12, 10, 12, -1
+};
+
+/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
+static const yytype_uint16 yyrline[] =
+{
+ 0, 132, 132, 152, 152, 154, 154, 154, 156, 156,
+ 156, 158, 158, 214, 214, 293, 311
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "CHIP", "DEVICE", "REGISTER", "BOOL",
+ "BUS", "RESOURCE", "END", "EQUALS", "HEX", "STRING", "PCI", "PNP", "I2C",
+ "APIC", "APIC_CLUSTER", "PCI_DOMAIN", "IRQ", "DRQ", "IO", "NUMBER",
+ "$accept", "devtree", "devchip", "devices", "devicesorresources", "chip",
+ "@1", "device", "@2", "resource", "registers", 0
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
+ token YYLEX-NUM. */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277
+};
+# endif
+
+/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint8 yyr1[] =
+{
+ 0, 23, 24, 25, 25, 26, 26, 26, 27, 27,
+ 27, 29, 28, 31, 30, 32, 33
+};
+
+/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 1, 1, 1, 2, 2, 0, 2, 2,
+ 0, 0, 5, 0, 7, 4, 4
+};
+
+/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
+ STATE-NUM when YYTABLE doesn't specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint8 yydefact[] =
+{
+ 0, 0, 0, 0, 2, 3, 4, 11, 0, 1,
+ 7, 0, 0, 13, 0, 12, 5, 6, 10, 0,
+ 0, 0, 0, 14, 8, 9, 16, 0, 0, 15
+};
+
+/* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int8 yydefgoto[] =
+{
+ -1, 3, 4, 12, 20, 5, 10, 6, 18, 25,
+ 17
+};
+
+/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+#define YYPACT_NINF -13
+static const yytype_int8 yypact[] =
+{
+ 8, -6, 6, 14, -13, -13, -13, -13, -7, -13,
+ -13, 10, -2, -13, 5, -13, -13, -13, -13, 9,
+ 1, 11, -4, -13, -13, -13, -13, 12, -1, -13
+};
+
+/* YYPGOTO[NTERM-NUM]. */
+static const yytype_int8 yypgoto[] =
+{
+ -13, -13, -12, -13, -13, -13, -13, -13, -13, -13,
+ -13
+};
+
+/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule which
+ number is the opposite. If zero, do what YYDEFACT says.
+ If YYTABLE_NINF, syntax error. */
+#define YYTABLE_NINF -1
+static const yytype_uint8 yytable[] =
+{
+ 16, 1, 2, 14, 1, 2, 7, 15, 24, 22,
+ 23, 1, 2, 8, 9, 11, 13, 19, 27, 21,
+ 0, 29, 28, 26
+};
+
+static const yytype_int8 yycheck[] =
+{
+ 12, 3, 4, 5, 3, 4, 12, 9, 20, 8,
+ 9, 3, 4, 7, 0, 22, 6, 12, 22, 10,
+ -1, 22, 10, 12
+};
+
+/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint8 yystos[] =
+{
+ 0, 3, 4, 24, 25, 28, 30, 12, 7, 0,
+ 29, 22, 26, 6, 5, 9, 25, 33, 31, 12,
+ 27, 10, 8, 9, 25, 32, 12, 22, 10, 22
+};
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+/* Like YYERROR except do call yyerror. This remains here temporarily
+ to ease the transition to the new meaning of YYERROR, for GCC.
+ Once GCC version 2 has supplanted version 1, this can go. */
+
+#define YYFAIL goto yyerrlab
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY && yylen == 1) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ yytoken = YYTRANSLATE (yychar); \
+ YYPOPSTACK (1); \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+while (YYID (0))
+
+
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+ If N is 0, then set CURRENT to the empty location which ends
+ the previous symbol: RHS[0] (always defined). */
+
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ do \
+ if (YYID (N)) \
+ { \
+ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC (Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC (Rhs, 0).last_column; \
+ } \
+ while (YYID (0))
+#endif
+
+
+/* YY_LOCATION_PRINT -- Print the location on the stream.
+ This macro was not mandated originally: define only if we know
+ we won't break user code: when these are the locations we know. */
+
+#ifndef YY_LOCATION_PRINT
+# if YYLTYPE_IS_TRIVIAL
+# define YY_LOCATION_PRINT(File, Loc) \
+ fprintf (File, "%d.%d-%d.%d", \
+ (Loc).first_line, (Loc).first_column, \
+ (Loc).last_line, (Loc).last_column)
+# else
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+# endif
+#endif
+
+
+/* YYLEX -- calling `yylex' with the right arguments. */
+
+#ifdef YYLEX_PARAM
+# define YYLEX yylex (YYLEX_PARAM)
+#else
+# define YYLEX yylex ()
+#endif
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (YYID (0))
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (YYID (0))
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+/*ARGSUSED*/
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
+#else
+static void
+yy_symbol_value_print (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE const * const yyvaluep;
+#endif
+{
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# else
+ YYUSE (yyoutput);
+# endif
+ switch (yytype)
+ {
+ default:
+ break;
+ }
+}
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
+#else
+static void
+yy_symbol_print (yyoutput, yytype, yyvaluep)
+ FILE *yyoutput;
+ int yytype;
+ YYSTYPE const * const yyvaluep;
+#endif
+{
+ if (yytype < YYNTOKENS)
+ YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
+ else
+ YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+
+ yy_symbol_value_print (yyoutput, yytype, yyvaluep);
+ YYFPRINTF (yyoutput, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
+#else
+static void
+yy_stack_print (yybottom, yytop)
+ yytype_int16 *yybottom;
+ yytype_int16 *yytop;
+#endif
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (YYID (0))
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
+#else
+static void
+yy_reduce_print (yyvsp, yyrule)
+ YYSTYPE *yyvsp;
+ int yyrule;
+#endif
+{
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ unsigned long int yylno = yyrline[yyrule];
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
+ &(yyvsp[(yyi + 1) - (yynrhs)])
+ );
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyvsp, Rule); \
+} while (YYID (0))
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static YYSIZE_T
+yystrlen (const char *yystr)
+#else
+static YYSIZE_T
+yystrlen (yystr)
+ const char *yystr;
+#endif
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+#else
+static char *
+yystpcpy (yydest, yysrc)
+ char *yydest;
+ const char *yysrc;
+#endif
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return yystpcpy (yyres, yystr) - yyres;
+}
+# endif
+
+/* Copy into YYRESULT an error message about the unexpected token
+ YYCHAR while in state YYSTATE. Return the number of bytes copied,
+ including the terminating null byte. If YYRESULT is null, do not
+ copy anything; just return the number of bytes that would be
+ copied. As a special case, return 0 if an ordinary "syntax error"
+ message will do. Return YYSIZE_MAXIMUM if overflow occurs during
+ size calculation. */
+static YYSIZE_T
+yysyntax_error (char *yyresult, int yystate, int yychar)
+{
+ int yyn = yypact[yystate];
+
+ if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
+ return 0;
+ else
+ {
+ int yytype = YYTRANSLATE (yychar);
+ YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
+ YYSIZE_T yysize = yysize0;
+ YYSIZE_T yysize1;
+ int yysize_overflow = 0;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ int yyx;
+
+# if 0
+ /* This is so xgettext sees the translatable formats that are
+ constructed on the fly. */
+ YY_("syntax error, unexpected %s");
+ YY_("syntax error, unexpected %s, expecting %s");
+ YY_("syntax error, unexpected %s, expecting %s or %s");
+ YY_("syntax error, unexpected %s, expecting %s or %s or %s");
+ YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
+# endif
+ char *yyfmt;
+ char const *yyf;
+ static char const yyunexpected[] = "syntax error, unexpected %s";
+ static char const yyexpecting[] = ", expecting %s";
+ static char const yyor[] = " or %s";
+ char yyformat[sizeof yyunexpected
+ + sizeof yyexpecting - 1
+ + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
+ * (sizeof yyor - 1))];
+ char const *yyprefix = yyexpecting;
+
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yycount = 1;
+
+ yyarg[0] = yytname[yytype];
+ yyfmt = yystpcpy (yyformat, yyunexpected);
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ yyformat[sizeof yyunexpected - 1] = '\0';
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+ yysize_overflow |= (yysize1 < yysize);
+ yysize = yysize1;
+ yyfmt = yystpcpy (yyfmt, yyprefix);
+ yyprefix = yyor;
+ }
+
+ yyf = YY_(yyformat);
+ yysize1 = yysize + yystrlen (yyf);
+ yysize_overflow |= (yysize1 < yysize);
+ yysize = yysize1;
+
+ if (yysize_overflow)
+ return YYSIZE_MAXIMUM;
+
+ if (yyresult)
+ {
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ char *yyp = yyresult;
+ int yyi = 0;
+ while ((*yyp = *yyf) != '\0')
+ {
+ if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyf += 2;
+ }
+ else
+ {
+ yyp++;
+ yyf++;
+ }
+ }
+ }
+ return yysize;
+ }
+}
+#endif /* YYERROR_VERBOSE */
+
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+/*ARGSUSED*/
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
+#else
+static void
+yydestruct (yymsg, yytype, yyvaluep)
+ const char *yymsg;
+ int yytype;
+ YYSTYPE *yyvaluep;
+#endif
+{
+ YYUSE (yyvaluep);
+
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ switch (yytype)
+ {
+
+ default:
+ break;
+ }
+}
+
+/* Prevent warnings from -Wmissing-prototypes. */
+#ifdef YYPARSE_PARAM
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void *YYPARSE_PARAM);
+#else
+int yyparse ();
+#endif
+#else /* ! YYPARSE_PARAM */
+#if defined __STDC__ || defined __cplusplus
+int yyparse (void);
+#else
+int yyparse ();
+#endif
+#endif /* ! YYPARSE_PARAM */
+
+
+/* The lookahead symbol. */
+int yychar;
+
+/* The semantic value of the lookahead symbol. */
+YYSTYPE yylval;
+
+/* Number of syntax errors so far. */
+int yynerrs;
+
+
+
+/*-------------------------.
+| yyparse or yypush_parse. |
+`-------------------------*/
+
+#ifdef YYPARSE_PARAM
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+int
+yyparse (void *YYPARSE_PARAM)
+#else
+int
+yyparse (YYPARSE_PARAM)
+ void *YYPARSE_PARAM;
+#endif
+#else /* ! YYPARSE_PARAM */
+#if (defined __STDC__ || defined __C99__FUNC__ \
+ || defined __cplusplus || defined _MSC_VER)
+int
+yyparse (void)
+#else
+int
+yyparse ()
+
+#endif
+#endif
+{
+
+
+ int yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ `yyss': related to states.
+ `yyvs': related to semantic values.
+
+ Refer to the stacks thru separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ YYSIZE_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yytoken = 0;
+ yyss = yyssa;
+ yyvs = yyvsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+ /* Initialize stack pointers.
+ Waste one element of value and location stack
+ so that they stay on the same level as the state stack.
+ The wasted elements are never initialized. */
+ yyssp = yyss;
+ yyvsp = yyvs;
+
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yyn == YYPACT_NINF)
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = YYLEX;
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yyn == 0 || yyn == YYTABLE_NINF)
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ *++yyvsp = yylval;
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ `$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+
+/* Line 1455 of yacc.c */
+#line 132 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ root.next_sibling = root.children;
+ root.next_sibling->next_sibling = root.next_sibling->children;
+
+ struct device *dev = &root;
+ while (dev) {
+ /* skip "chip" elements in children chain */
+ while (dev->children && (dev->children->type == chip)) dev->children = dev->children->children;
+ /* skip "chip" elements and functions of the same device in sibling chain */
+ while (dev->sibling && dev->sibling->used) dev->sibling = dev->sibling->sibling;
+ /* If end of chain, and parent is a chip, move on */
+ if (!dev->sibling && (dev->parent->type == chip)) dev->sibling = dev->parent->sibling;
+ /* skip chips */
+ while (dev->sibling && dev->sibling->type == chip) dev->sibling = dev->sibling->children;
+ /* skip duplicate function elements in nextdev chain */
+ while (dev->nextdev && dev->nextdev->used) dev->nextdev = dev->nextdev->nextdev;
+ dev = dev->next_sibling;
+ }
+ ;}
+ break;
+
+ case 11:
+
+/* Line 1455 of yacc.c */
+#line 158 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ (yyval.device) = new_dev();
+ (yyval.device)->chiph_exists = 1;
+ (yyval.device)->name = (yyvsp[(2) - (2)].string);
+ (yyval.device)->name_underscore = strdup((yyval.device)->name);
+ char *c;
+ for (c = (yyval.device)->name_underscore; *c; c++) {
+ if (*c == '/') *c = '_';
+ }
+ (yyval.device)->type = chip;
+ (yyval.device)->chip = (yyval.device);
+
+ struct stat st;
+ char *chip_h = malloc(strlen((yyvsp[(2) - (2)].string))+12);
+ sprintf(chip_h, "src/%s/chip.h", (yyvsp[(2) - (2)].string));
+ if ((stat(chip_h, &st) == -1) && (errno == ENOENT))
+ (yyval.device)->chiph_exists = 0;
+
+ if (cur_parent->latestchild) {
+ cur_parent->latestchild->next_sibling = (yyval.device);
+ cur_parent->latestchild->sibling = (yyval.device);
+ }
+ cur_parent->latestchild = (yyval.device);
+ if (!cur_parent->children)
+ cur_parent->children = (yyval.device);
+
+ cur_parent = (yyval.device);
+;}
+ break;
+
+ case 12:
+
+/* Line 1455 of yacc.c */
+#line 186 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ cur_parent = (yyvsp[(3) - (5)].device)->parent;
+
+ fold_in((yyvsp[(3) - (5)].device));
+
+ if ((yyvsp[(3) - (5)].device)->chiph_exists) {
+ int include_exists = 0;
+ struct header *h = &headers;
+ while (h->next) {
+ int result = strcmp((yyvsp[(3) - (5)].device)->name, h->next->name);
+ if (result == 0) {
+ include_exists = 1;
+ break;
+ }
+ if (result < 0) break;
+ h = h->next;
+ }
+ if (!include_exists) {
+ struct header *tmp = h->next;
+ h->next = malloc(sizeof(struct header));
+ memset(h->next, 0, sizeof(struct header));
+ h->next->name = (yyvsp[(3) - (5)].device)->name;
+ h->next->next = tmp;
+ break;
+ }
+ }
+;}
+ break;
+
+ case 13:
+
+/* Line 1455 of yacc.c */
+#line 214 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ (yyval.device) = new_dev();
+ (yyval.device)->bustype = (yyvsp[(2) - (4)].number);
+
+ char *tmp;
+ (yyval.device)->path_a = strtol(strdup((yyvsp[(3) - (4)].string)), &tmp, 16);
+ if (*tmp == '.') {
+ tmp++;
+ (yyval.device)->path_b = strtol(tmp, NULL, 16);
+ }
+
+ char *name = malloc(10);
+ sprintf(name, "_dev%d", (yyval.device)->id);
+ (yyval.device)->name = name;
+ (yyval.device)->name_underscore = name; // shouldn't be necessary, but avoid 0-ptr
+ (yyval.device)->type = device;
+ (yyval.device)->enabled = (yyvsp[(4) - (4)].number);
+ (yyval.device)->chip = (yyval.device)->parent->chip;
+
+ if (cur_parent->latestchild) {
+ cur_parent->latestchild->next_sibling = (yyval.device);
+ cur_parent->latestchild->sibling = (yyval.device);
+ }
+ cur_parent->latestchild = (yyval.device);
+ if (!cur_parent->children)
+ cur_parent->children = (yyval.device);
+
+ lastdev->nextdev = (yyval.device);
+ lastdev = (yyval.device);
+ if ((yyvsp[(2) - (4)].number) == PCI) {
+ (yyval.device)->path = ".type=DEVICE_PATH_PCI,{.pci={ .devfn = PCI_DEVFN(0x%x,%d)}}";
+ }
+ if ((yyvsp[(2) - (4)].number) == PNP) {
+ (yyval.device)->path = ".type=DEVICE_PATH_PNP,{.pnp={ .port = 0x%x, .device = 0x%x }}";
+ }
+ if ((yyvsp[(2) - (4)].number) == I2C) {
+ (yyval.device)->path = ".type=DEVICE_PATH_I2C,{.i2c={ .device = 0x%x }}";
+ }
+ if ((yyvsp[(2) - (4)].number) == APIC) {
+ (yyval.device)->path = ".type=DEVICE_PATH_APIC,{.apic={ .apic_id = 0x%x }}";
+ }
+ if ((yyvsp[(2) - (4)].number) == APIC_CLUSTER) {
+ (yyval.device)->path = ".type=DEVICE_PATH_APIC_CLUSTER,{.apic_cluster={ .cluster = 0x%x }}";
+ }
+ if ((yyvsp[(2) - (4)].number) == PCI_DOMAIN) {
+ (yyval.device)->path = ".type=DEVICE_PATH_PCI_DOMAIN,{.pci_domain={ .domain = 0x%x }}";
+ }
+ cur_parent = (yyval.device);
+ cur_bus = (yyval.device);
+;}
+ break;
+
+ case 14:
+
+/* Line 1455 of yacc.c */
+#line 264 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ cur_parent = (yyvsp[(5) - (7)].device)->parent;
+ cur_bus = (yyvsp[(5) - (7)].device)->bus;
+
+ fold_in((yyvsp[(5) - (7)].device));
+
+ struct device *d = (yyvsp[(5) - (7)].device)->children;
+ while (d) {
+ int link = 0;
+ struct device *cmp = d->next_sibling;
+ while (cmp && (cmp->bus == d->bus) && (cmp->path_a == d->path_a) && (cmp->path_b == d->path_b)) {
+ if (cmp->type==device && !cmp->used) {
+ if (device_match(d, cmp)) {
+ d->multidev = 1;
+
+ cmp->aliased_name = malloc(12);
+ sprintf(cmp->aliased_name, "_dev%d", cmp->id);
+ cmp->id = d->id;
+ cmp->name = d->name;
+ cmp->used = 1;
+ cmp->link = ++link;
+ }
+ }
+ cmp = cmp->next_sibling;
+ }
+ d = d->next_sibling;
+ }
+;}
+ break;
+
+ case 15:
+
+/* Line 1455 of yacc.c */
+#line 294 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ struct resource *r = malloc(sizeof(struct resource));
+ memset (r, 0, sizeof(struct resource));
+ r->type = (yyvsp[(1) - (4)].number);
+ r->index = strtol((yyvsp[(2) - (4)].string), NULL, 0);
+ r->base = strtol((yyvsp[(4) - (4)].string), NULL, 0);
+ if (cur_parent->res) {
+ struct resource *head = cur_parent->res;
+ while (head->next) head = head->next;
+ head->next = r;
+ } else {
+ cur_parent->res = r;
+ }
+ cur_parent->rescnt++;
+ ;}
+ break;
+
+ case 16:
+
+/* Line 1455 of yacc.c */
+#line 312 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+ {
+ struct reg *r = malloc(sizeof(struct reg));
+ memset (r, 0, sizeof(struct reg));
+ r->key = (yyvsp[(2) - (4)].string);
+ r->value = (yyvsp[(4) - (4)].string);
+ if (cur_parent->reg) {
+ struct reg *head = cur_parent->reg;
+ // sorting to be equal to sconfig's behaviour
+ int sort = strcmp(r->key, head->key);
+ if (sort == 0) {
+ printf("ERROR: duplicate 'register' key.\n");
+ exit(1);
+ }
+ if (sort<0) {
+ r->next = head;
+ cur_parent->reg = r;
+ } else {
+ while ((head->next) && (strcmp(head->next->key, r->key)<0)) head = head->next;
+ r->next = head->next;
+ head->next = r;
+ }
+ } else {
+ cur_parent->reg = r;
+ }
+ ;}
+ break;
+
+
+
+/* Line 1455 of yacc.c */
+#line 1715 "/home/Patrick/work/coreboot/util/sconfig/sconfig.tab.c_shipped"
+ default: break;
+ }
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+ /* Now `shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*------------------------------------.
+| yyerrlab -- here on detecting error |
+`------------------------------------*/
+yyerrlab:
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (YY_("syntax error"));
+#else
+ {
+ YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
+ if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
+ {
+ YYSIZE_T yyalloc = 2 * yysize;
+ if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
+ yyalloc = YYSTACK_ALLOC_MAXIMUM;
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yyalloc);
+ if (yymsg)
+ yymsg_alloc = yyalloc;
+ else
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ }
+ }
+
+ if (0 < yysize && yysize <= yymsg_alloc)
+ {
+ (void) yysyntax_error (yymsg, yystate, yychar);
+ yyerror (yymsg);
+ }
+ else
+ {
+ yyerror (YY_("syntax error"));
+ if (yysize != 0)
+ goto yyexhaustedlab;
+ }
+ }
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+
+ /* Pacify compilers like GCC when the user code never invokes
+ YYERROR and the label yyerrorlab therefore never appears in user
+ code. */
+ if (/*CONSTCOND*/ 0)
+ goto yyerrorlab;
+
+ /* Do not reclaim the symbols of the rule which action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (yyn != YYPACT_NINF)
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ *++yyvsp = yylval;
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+#if !defined(yyoverflow) || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+ if (yychar != YYEMPTY)
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval);
+ /* Do not reclaim the symbols of the rule which action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ /* Make sure YYID is used. */
+ return YYID (yyresult);
+}
+
+
+
+/* Line 1675 of yacc.c */
+#line 339 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+
+void pass0(FILE *fil, struct device *ptr) {
+ if ((ptr->type == device) && (ptr->id != 0) && (!ptr->used))
+ fprintf(fil, "struct device %s;\n", ptr->name);
+ if ((ptr->type == device) && (ptr->id != 0) && ptr->used)
+ fprintf(fil, "struct device %s;\n", ptr->aliased_name);
+}
+
+void pass1(FILE *fil, struct device *ptr) {
+ if (!ptr->used && (ptr->type == device)) {
+ fprintf(fil, "struct device %s = {\n", ptr->name);
+ fprintf(fil, "\t.ops = %s,\n", (ptr->ops)?(ptr->ops):"0");
+ fprintf(fil, "\t.bus = &%s.link[%d],\n", ptr->bus->name, ptr->bus->link);
+ fprintf(fil, "\t.path = {");
+ fprintf(fil, ptr->path, ptr->path_a, ptr->path_b);
+ fprintf(fil, "},\n");
+ fprintf(fil, "\t.enabled = %d,\n", ptr->enabled);
+ fprintf(fil, "\t.on_mainboard = 1,\n");
+ if (ptr->rescnt > 0) {
+ fprintf(fil, "\t.resources = %d,\n", ptr->rescnt);
+ fprintf(fil, "\t.resource = {\n");
+ struct resource *r = ptr->res;
+ while (r) {
+ fprintf(fil, "\t\t{ .flags=IORESOURCE_FIXED | IORESOURCE_ASSIGNED | IORESOURCE_");
+ if (r->type == IRQ) fprintf(fil, "IRQ");
+ if (r->type == DRQ) fprintf(fil, "DRQ");
+ if (r->type == IO) fprintf(fil, "IO");
+ fprintf(fil, ", .index=0x%x, .base=0x%x},\n", r->index, r->base);
+ r = r->next;
+ }
+ fprintf(fil, "\t },\n");
+ }
+ int link = 0;
+ fprintf(fil, "\t.link = {\n");
+ if (ptr->multidev) {
+ struct device *d = ptr;
+ while (d) {
+ if (device_match(d, ptr)) {
+ fprintf(fil, "\t\t[%d] = {\n", d->link);
+ fprintf(fil, "\t\t\t.link = %d,\n", d->link);
+ fprintf(fil, "\t\t\t.dev = &%s,\n", d->name);
+ if (d->children)
+ fprintf(fil, "\t\t\t.children = &%s,\n", d->children->name);
+ fprintf(fil, "\t\t},\n");
+ link++;
+ }
+ d = d->next_sibling;
+ }
+ } else {
+ if (ptr->children) {
+ fprintf(fil, "\t\t[0] = {\n");
+ fprintf(fil, "\t\t\t.link = 0,\n");
+ fprintf(fil, "\t\t\t.dev = &%s,\n", ptr->name);
+ fprintf(fil, "\t\t\t.children = &%s,\n", ptr->children->name);
+ fprintf(fil, "\t\t},\n");
+ link++;
+ }
+ }
+ fprintf(fil, "\t},\n");
+ fprintf(fil, "\t.links = %d,\n", link);
+ if (ptr->sibling)
+ fprintf(fil, "\t.sibling = &%s,\n", ptr->sibling->name);
+ if (ptr->chip->chiph_exists) {
+ fprintf(fil, "\t.chip_ops = &%s_ops,\n", ptr->chip->name_underscore);
+ fprintf(fil, "\t.chip_info = &%s_info_%d,\n", ptr->chip->name_underscore, ptr->chip->id);
+ }
+ if (ptr->nextdev)
+ fprintf(fil, "\t.next=&%s\n", ptr->nextdev->name);
+ fprintf(fil, "};\n");
+ }
+ if ((ptr->type == chip) && (ptr->chiph_exists)) {
+ if (ptr->reg) {
+ fprintf(fil, "struct %s_config %s_info_%d\t= {\n", ptr->name_underscore, ptr->name_underscore, ptr->id);
+ struct reg *r = ptr->reg;
+ while (r) {
+ fprintf(fil, "\t.%s = %s,\n", r->key, r->value);
+ r = r->next;
+ }
+ fprintf(fil, "};\n\n");
+ } else {
+ fprintf(fil, "struct %s_config %s_info_%d;\n", ptr->name_underscore, ptr->name_underscore, ptr->id);
+ }
+ }
+}
+
+void walk_device_tree(FILE *fil, struct device *ptr, void (*func)(FILE *, struct device*), struct device *chips) {
+ do {
+ func(fil, ptr);
+ ptr = ptr->next_sibling;
+ } while (ptr);
+}
+
+struct device mainboard = {
+ .name = "mainboard",
+ .name_underscore = "mainboard",
+ .id = 0,
+ .chip = &mainboard,
+ .type = chip,
+ .chiph_exists = 1,
+ .children = &root
+};
+
+struct device root = {
+ .name = "dev_root",
+ .name_underscore = "dev_root",
+ .id = 0,
+ .chip = &mainboard,
+ .type = device,
+ .path = " .type = DEVICE_PATH_ROOT ",
+ .ops = "&default_dev_ops_root",
+ .parent = &root,
+ .bus = &root,
+ .enabled = 1
+};
+
+int main(int argc, char** argv) {
+ if (argc != 3) {
+ printf("usage: sconfig vendor/mainboard outputdir\n");
+ return 1;
+ }
+ char *mainboard=argv[1];
+ char *outputdir=argv[2];
+ char *devtree=malloc(strlen(mainboard)+30);
+ char *outputc=malloc(strlen(outputdir)+10);
+ sprintf(devtree, "src/mainboard/%s/devicetree.cb", mainboard);
+ sprintf(outputc, "%s/static.c", outputdir);
+
+ headers.next = malloc(sizeof(struct header));
+ headers.next->name = malloc(strlen(mainboard)+12);
+ headers.next->next = 0;
+ sprintf(headers.next->name, "mainboard/%s", mainboard);
+
+ FILE *filec = fopen(devtree, "r");
+ yyrestart(filec);
+
+ FILE *staticc = fopen(outputc, "w");
+
+ cur_bus = cur_parent = lastdev = head = &root;
+ yyparse();
+ fclose(filec);
+
+ if ((head->type == chip) && (!head->chiph_exists)) {
+ struct device *tmp = head;
+ head = &root;
+ while (head->next != tmp) head = head->next;
+ }
+
+ fprintf(staticc, "#include <device/device.h>\n");
+ fprintf(staticc, "#include <device/pci.h>\n");
+ struct header *h = &headers;
+ while (h->next) {
+ h = h->next;
+ fprintf(staticc, "#include \"%s/chip.h\"\n", h->name);
+ }
+ fprintf(staticc, "\n/* pass 0 */\n");
+ walk_device_tree(staticc, &root, pass0, NULL);
+ fprintf(staticc, "\n/* pass 1 */\nstruct mainboard_config mainboard_info_0;\nstruct device **last_dev_p = &%s.next;\n", lastdev->name);
+ walk_device_tree(staticc, &root, pass1, NULL);
+
+ fclose(staticc);
+}
+
diff --git a/util/sconfig/sconfig.tab.h_shipped b/util/sconfig/sconfig.tab.h_shipped
new file mode 100644
index 0000000000..906d1200cc
--- /dev/null
+++ b/util/sconfig/sconfig.tab.h_shipped
@@ -0,0 +1,90 @@
+
+/* A Bison parser, made by GNU Bison 2.4.1. */
+
+/* Skeleton interface for Bison's Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+
+/* Tokens. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ /* Put the tokens into the symbol table, so that GDB and other debuggers
+ know about them. */
+ enum yytokentype {
+ CHIP = 258,
+ DEVICE = 259,
+ REGISTER = 260,
+ BOOL = 261,
+ BUS = 262,
+ RESOURCE = 263,
+ END = 264,
+ EQUALS = 265,
+ HEX = 266,
+ STRING = 267,
+ PCI = 268,
+ PNP = 269,
+ I2C = 270,
+ APIC = 271,
+ APIC_CLUSTER = 272,
+ PCI_DOMAIN = 273,
+ IRQ = 274,
+ DRQ = 275,
+ IO = 276,
+ NUMBER = 277
+ };
+#endif
+
+
+
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+typedef union YYSTYPE
+{
+
+/* Line 1676 of yacc.c */
+#line 125 "/home/Patrick/work/coreboot/util/sconfig/sconfig.y"
+
+ struct device *device;
+ char *string;
+ int number;
+
+
+
+/* Line 1676 of yacc.c */
+#line 82 "/home/Patrick/work/coreboot/util/sconfig/sconfig.tab.h_shipped"
+} YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+extern YYSTYPE yylval;
+
+
diff --git a/util/sconfig/sconfig.y b/util/sconfig/sconfig.y
new file mode 100755
index 0000000000..180ea8a735
--- /dev/null
+++ b/util/sconfig/sconfig.y
@@ -0,0 +1,499 @@
+%{
+/*
+ * sconfig, coreboot device tree compiler
+ *
+ * Copyright (C) 2010 coresystems GmbH
+ * written by Patrick Georgi <patrick.georgi@coresystems.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+
+enum devtype { chip, device };
+
+struct resource;
+struct resource {
+ int type;
+ int index;
+ int base;
+ struct resource *next;
+};
+
+struct reg;
+struct reg {
+ char *key;
+ char *value;
+ struct reg *next;
+};
+
+struct device;
+struct device {
+ int id;
+ int enabled;
+ int used;
+ int multidev;
+ int link;
+ int rescnt;
+ int chiph_exists;
+ char *ops;
+ char *name;
+ char *aliased_name;
+ char *name_underscore;
+ char *path;
+ int path_a;
+ int path_b;
+ int bustype;
+ enum devtype type;
+ struct device *parent;
+ struct device *bus;
+ struct device *next;
+ struct device *nextdev;
+ struct device *children;
+ struct device *latestchild;
+ struct device *next_sibling;
+ struct device *sibling;
+ struct device *chip;
+ struct resource *res;
+ struct reg *reg;
+} *head, *lastdev, *cur_parent, *cur_bus, root;
+
+struct header;
+struct header {
+ char *name;
+ struct header *next;
+} headers;
+
+int devcount = 0;
+
+struct device *new_dev() {
+ struct device *dev = malloc(sizeof(struct device));
+ memset(dev, 0, sizeof(struct device));
+ dev->id = ++devcount;
+ dev->parent = cur_parent;
+ dev->bus = cur_bus;
+ head->next = dev;
+ head = dev;
+ return dev;
+}
+
+int device_match(struct device *a, struct device *b) {
+ if ((a->bustype == b->bustype) && (a->bus == b->bus) && (a->path_a == b->path_a) && (a->path_b == b->path_b))
+ return 1;
+ return 0;
+}
+
+void fold_in(struct device *parent) {
+ struct device *child = parent->children;
+ struct device *latest = 0;
+ while (child != latest) {
+ if (child->children) {
+ if (!latest) latest = child->children;
+ parent->latestchild->next_sibling = child->children;
+ parent->latestchild = child->latestchild;
+ }
+ child = child->next_sibling;
+ }
+}
+
+int yywrap(void) {
+ return 1;
+}
+
+void yyerror (char const *str)
+{
+ fprintf (stderr, "%s\n", str);
+}
+%}
+%union {
+ struct device *device;
+ char *string;
+ int number;
+}
+%token CHIP DEVICE REGISTER BOOL BUS RESOURCE END EQUALS HEX STRING PCI PNP I2C APIC APIC_CLUSTER PCI_DOMAIN IRQ DRQ IO NUMBER
+%%
+devtree: devchip {
+ root.next_sibling = root.children;
+ root.next_sibling->next_sibling = root.next_sibling->children;
+
+ struct device *dev = &root;
+ while (dev) {
+ /* skip "chip" elements in children chain */
+ while (dev->children && (dev->children->type == chip)) dev->children = dev->children->children;
+ /* skip "chip" elements and functions of the same device in sibling chain */
+ while (dev->sibling && dev->sibling->used) dev->sibling = dev->sibling->sibling;
+ /* If end of chain, and parent is a chip, move on */
+ if (!dev->sibling && (dev->parent->type == chip)) dev->sibling = dev->parent->sibling;
+ /* skip chips */
+ while (dev->sibling && dev->sibling->type == chip) dev->sibling = dev->sibling->children;
+ /* skip duplicate function elements in nextdev chain */
+ while (dev->nextdev && dev->nextdev->used) dev->nextdev = dev->nextdev->nextdev;
+ dev = dev->next_sibling;
+ }
+ };
+
+devchip: chip | device ;
+
+devices: devices devchip | devices registers | ;
+
+devicesorresources: devicesorresources devchip | devicesorresources resource | ;
+
+chip: CHIP STRING /* == path */ {
+ $<device>$ = new_dev();
+ $<device>$->chiph_exists = 1;
+ $<device>$->name = $<string>2;
+ $<device>$->name_underscore = strdup($<device>$->name);
+ char *c;
+ for (c = $<device>$->name_underscore; *c; c++) {
+ if (*c == '/') *c = '_';
+ }
+ $<device>$->type = chip;
+ $<device>$->chip = $<device>$;
+
+ struct stat st;
+ char *chip_h = malloc(strlen($<string>2)+12);
+ sprintf(chip_h, "src/%s/chip.h", $<string>2);
+ if ((stat(chip_h, &st) == -1) && (errno == ENOENT))
+ $<device>$->chiph_exists = 0;
+
+ if (cur_parent->latestchild) {
+ cur_parent->latestchild->next_sibling = $<device>$;
+ cur_parent->latestchild->sibling = $<device>$;
+ }
+ cur_parent->latestchild = $<device>$;
+ if (!cur_parent->children)
+ cur_parent->children = $<device>$;
+
+ cur_parent = $<device>$;
+}
+ devices END {
+ cur_parent = $<device>3->parent;
+
+ fold_in($<device>3);
+
+ if ($<device>3->chiph_exists) {
+ int include_exists = 0;
+ struct header *h = &headers;
+ while (h->next) {
+ int result = strcmp($<device>3->name, h->next->name);
+ if (result == 0) {
+ include_exists = 1;
+ break;
+ }
+ if (result < 0) break;
+ h = h->next;
+ }
+ if (!include_exists) {
+ struct header *tmp = h->next;
+ h->next = malloc(sizeof(struct header));
+ memset(h->next, 0, sizeof(struct header));
+ h->next->name = $<device>3->name;
+ h->next->next = tmp;
+ break;
+ }
+ }
+};
+
+device: DEVICE BUS NUMBER /* == devnum */ BOOL {
+ $<device>$ = new_dev();
+ $<device>$->bustype = $<number>2;
+
+ char *tmp;
+ $<device>$->path_a = strtol(strdup($<string>3), &tmp, 16);
+ if (*tmp == '.') {
+ tmp++;
+ $<device>$->path_b = strtol(tmp, NULL, 16);
+ }
+
+ char *name = malloc(10);
+ sprintf(name, "_dev%d", $<device>$->id);
+ $<device>$->name = name;
+ $<device>$->name_underscore = name; // shouldn't be necessary, but avoid 0-ptr
+ $<device>$->type = device;
+ $<device>$->enabled = $<number>4;
+ $<device>$->chip = $<device>$->parent->chip;
+
+ if (cur_parent->latestchild) {
+ cur_parent->latestchild->next_sibling = $<device>$;
+ cur_parent->latestchild->sibling = $<device>$;
+ }
+ cur_parent->latestchild = $<device>$;
+ if (!cur_parent->children)
+ cur_parent->children = $<device>$;
+
+ lastdev->nextdev = $<device>$;
+ lastdev = $<device>$;
+ if ($<number>2 == PCI) {
+ $<device>$->path = ".type=DEVICE_PATH_PCI,{.pci={ .devfn = PCI_DEVFN(0x%x,%d)}}";
+ }
+ if ($<number>2 == PNP) {
+ $<device>$->path = ".type=DEVICE_PATH_PNP,{.pnp={ .port = 0x%x, .device = 0x%x }}";
+ }
+ if ($<number>2 == I2C) {
+ $<device>$->path = ".type=DEVICE_PATH_I2C,{.i2c={ .device = 0x%x }}";
+ }
+ if ($<number>2 == APIC) {
+ $<device>$->path = ".type=DEVICE_PATH_APIC,{.apic={ .apic_id = 0x%x }}";
+ }
+ if ($<number>2 == APIC_CLUSTER) {
+ $<device>$->path = ".type=DEVICE_PATH_APIC_CLUSTER,{.apic_cluster={ .cluster = 0x%x }}";
+ }
+ if ($<number>2 == PCI_DOMAIN) {
+ $<device>$->path = ".type=DEVICE_PATH_PCI_DOMAIN,{.pci_domain={ .domain = 0x%x }}";
+ }
+ cur_parent = $<device>$;
+ cur_bus = $<device>$;
+}
+ devicesorresources END {
+ cur_parent = $<device>5->parent;
+ cur_bus = $<device>5->bus;
+
+ fold_in($<device>5);
+
+ struct device *d = $<device>5->children;
+ while (d) {
+ int link = 0;
+ struct device *cmp = d->next_sibling;
+ while (cmp && (cmp->bus == d->bus) && (cmp->path_a == d->path_a) && (cmp->path_b == d->path_b)) {
+ if (cmp->type==device && !cmp->used) {
+ if (device_match(d, cmp)) {
+ d->multidev = 1;
+
+ cmp->aliased_name = malloc(12);
+ sprintf(cmp->aliased_name, "_dev%d", cmp->id);
+ cmp->id = d->id;
+ cmp->name = d->name;
+ cmp->used = 1;
+ cmp->link = ++link;
+ }
+ }
+ cmp = cmp->next_sibling;
+ }
+ d = d->next_sibling;
+ }
+};
+
+resource: RESOURCE NUMBER /* == resnum */ EQUALS NUMBER /* == resval */
+ {
+ struct resource *r = malloc(sizeof(struct resource));
+ memset (r, 0, sizeof(struct resource));
+ r->type = $<number>1;
+ r->index = strtol($<string>2, NULL, 0);
+ r->base = strtol($<string>4, NULL, 0);
+ if (cur_parent->res) {
+ struct resource *head = cur_parent->res;
+ while (head->next) head = head->next;
+ head->next = r;
+ } else {
+ cur_parent->res = r;
+ }
+ cur_parent->rescnt++;
+ }
+ ;
+
+registers: REGISTER STRING /* == regname */ EQUALS STRING /* == regval */
+ {
+ struct reg *r = malloc(sizeof(struct reg));
+ memset (r, 0, sizeof(struct reg));
+ r->key = $<string>2;
+ r->value = $<string>4;
+ if (cur_parent->reg) {
+ struct reg *head = cur_parent->reg;
+ // sorting to be equal to sconfig's behaviour
+ int sort = strcmp(r->key, head->key);
+ if (sort == 0) {
+ printf("ERROR: duplicate 'register' key.\n");
+ exit(1);
+ }
+ if (sort<0) {
+ r->next = head;
+ cur_parent->reg = r;
+ } else {
+ while ((head->next) && (strcmp(head->next->key, r->key)<0)) head = head->next;
+ r->next = head->next;
+ head->next = r;
+ }
+ } else {
+ cur_parent->reg = r;
+ }
+ }
+ ;
+
+%%
+void pass0(FILE *fil, struct device *ptr) {
+ if ((ptr->type == device) && (ptr->id != 0) && (!ptr->used))
+ fprintf(fil, "struct device %s;\n", ptr->name);
+ if ((ptr->type == device) && (ptr->id != 0) && ptr->used)
+ fprintf(fil, "struct device %s;\n", ptr->aliased_name);
+}
+
+void pass1(FILE *fil, struct device *ptr) {
+ if (!ptr->used && (ptr->type == device)) {
+ fprintf(fil, "struct device %s = {\n", ptr->name);
+ fprintf(fil, "\t.ops = %s,\n", (ptr->ops)?(ptr->ops):"0");
+ fprintf(fil, "\t.bus = &%s.link[%d],\n", ptr->bus->name, ptr->bus->link);
+ fprintf(fil, "\t.path = {");
+ fprintf(fil, ptr->path, ptr->path_a, ptr->path_b);
+ fprintf(fil, "},\n");
+ fprintf(fil, "\t.enabled = %d,\n", ptr->enabled);
+ fprintf(fil, "\t.on_mainboard = 1,\n");
+ if (ptr->rescnt > 0) {
+ fprintf(fil, "\t.resources = %d,\n", ptr->rescnt);
+ fprintf(fil, "\t.resource = {\n");
+ struct resource *r = ptr->res;
+ while (r) {
+ fprintf(fil, "\t\t{ .flags=IORESOURCE_FIXED | IORESOURCE_ASSIGNED | IORESOURCE_");
+ if (r->type == IRQ) fprintf(fil, "IRQ");
+ if (r->type == DRQ) fprintf(fil, "DRQ");
+ if (r->type == IO) fprintf(fil, "IO");
+ fprintf(fil, ", .index=0x%x, .base=0x%x},\n", r->index, r->base);
+ r = r->next;
+ }
+ fprintf(fil, "\t },\n");
+ }
+ int link = 0;
+ fprintf(fil, "\t.link = {\n");
+ if (ptr->multidev) {
+ struct device *d = ptr;
+ while (d) {
+ if (device_match(d, ptr)) {
+ fprintf(fil, "\t\t[%d] = {\n", d->link);
+ fprintf(fil, "\t\t\t.link = %d,\n", d->link);
+ fprintf(fil, "\t\t\t.dev = &%s,\n", d->name);
+ if (d->children)
+ fprintf(fil, "\t\t\t.children = &%s,\n", d->children->name);
+ fprintf(fil, "\t\t},\n");
+ link++;
+ }
+ d = d->next_sibling;
+ }
+ } else {
+ if (ptr->children) {
+ fprintf(fil, "\t\t[0] = {\n");
+ fprintf(fil, "\t\t\t.link = 0,\n");
+ fprintf(fil, "\t\t\t.dev = &%s,\n", ptr->name);
+ fprintf(fil, "\t\t\t.children = &%s,\n", ptr->children->name);
+ fprintf(fil, "\t\t},\n");
+ link++;
+ }
+ }
+ fprintf(fil, "\t},\n");
+ fprintf(fil, "\t.links = %d,\n", link);
+ if (ptr->sibling)
+ fprintf(fil, "\t.sibling = &%s,\n", ptr->sibling->name);
+ if (ptr->chip->chiph_exists) {
+ fprintf(fil, "\t.chip_ops = &%s_ops,\n", ptr->chip->name_underscore);
+ fprintf(fil, "\t.chip_info = &%s_info_%d,\n", ptr->chip->name_underscore, ptr->chip->id);
+ }
+ if (ptr->nextdev)
+ fprintf(fil, "\t.next=&%s\n", ptr->nextdev->name);
+ fprintf(fil, "};\n");
+ }
+ if ((ptr->type == chip) && (ptr->chiph_exists)) {
+ if (ptr->reg) {
+ fprintf(fil, "struct %s_config %s_info_%d\t= {\n", ptr->name_underscore, ptr->name_underscore, ptr->id);
+ struct reg *r = ptr->reg;
+ while (r) {
+ fprintf(fil, "\t.%s = %s,\n", r->key, r->value);
+ r = r->next;
+ }
+ fprintf(fil, "};\n\n");
+ } else {
+ fprintf(fil, "struct %s_config %s_info_%d;\n", ptr->name_underscore, ptr->name_underscore, ptr->id);
+ }
+ }
+}
+
+void walk_device_tree(FILE *fil, struct device *ptr, void (*func)(FILE *, struct device*), struct device *chips) {
+ do {
+ func(fil, ptr);
+ ptr = ptr->next_sibling;
+ } while (ptr);
+}
+
+struct device mainboard = {
+ .name = "mainboard",
+ .name_underscore = "mainboard",
+ .id = 0,
+ .chip = &mainboard,
+ .type = chip,
+ .chiph_exists = 1,
+ .children = &root
+};
+
+struct device root = {
+ .name = "dev_root",
+ .name_underscore = "dev_root",
+ .id = 0,
+ .chip = &mainboard,
+ .type = device,
+ .path = " .type = DEVICE_PATH_ROOT ",
+ .ops = "&default_dev_ops_root",
+ .parent = &root,
+ .bus = &root,
+ .enabled = 1
+};
+
+int main(int argc, char** argv) {
+ if (argc != 3) {
+ printf("usage: sconfig vendor/mainboard outputdir\n");
+ return 1;
+ }
+ char *mainboard=argv[1];
+ char *outputdir=argv[2];
+ char *devtree=malloc(strlen(mainboard)+30);
+ char *outputc=malloc(strlen(outputdir)+10);
+ sprintf(devtree, "src/mainboard/%s/devicetree.cb", mainboard);
+ sprintf(outputc, "%s/static.c", outputdir);
+
+ headers.next = malloc(sizeof(struct header));
+ headers.next->name = malloc(strlen(mainboard)+12);
+ headers.next->next = 0;
+ sprintf(headers.next->name, "mainboard/%s", mainboard);
+
+ FILE *filec = fopen(devtree, "r");
+ yyrestart(filec);
+
+ FILE *staticc = fopen(outputc, "w");
+
+ cur_bus = cur_parent = lastdev = head = &root;
+ yyparse();
+ fclose(filec);
+
+ if ((head->type == chip) && (!head->chiph_exists)) {
+ struct device *tmp = head;
+ head = &root;
+ while (head->next != tmp) head = head->next;
+ }
+
+ fprintf(staticc, "#include <device/device.h>\n");
+ fprintf(staticc, "#include <device/pci.h>\n");
+ struct header *h = &headers;
+ while (h->next) {
+ h = h->next;
+ fprintf(staticc, "#include \"%s/chip.h\"\n", h->name);
+ }
+ fprintf(staticc, "\n/* pass 0 */\n");
+ walk_device_tree(staticc, &root, pass0, NULL);
+ fprintf(staticc, "\n/* pass 1 */\nstruct mainboard_config mainboard_info_0;\nstruct device **last_dev_p = &%s.next;\n", lastdev->name);
+ walk_device_tree(staticc, &root, pass1, NULL);
+
+ fclose(staticc);
+}
diff --git a/util/sconfig/test.config b/util/sconfig/test.config
deleted file mode 100644
index c492f200fa..0000000000
--- a/util/sconfig/test.config
+++ /dev/null
@@ -1,6 +0,0 @@
-target x
-mainboard amd/solo
-# option X=1
-# makerule x y "z"
- payload /dev/null
-end
diff --git a/util/sconfig/yapps2.py b/util/sconfig/yapps2.py
deleted file mode 100644
index b408cbcfb2..0000000000
--- a/util/sconfig/yapps2.py
+++ /dev/null
@@ -1,779 +0,0 @@
-# Yapps 2.0 - yet another python parser system
-# Amit J Patel, January 1999
-# See http://theory.stanford.edu/~amitp/Yapps/ for documentation and updates
-
-# v2.0.1 changes (October 2001):
-# * The exceptions inherit the standard Exception class (thanks Rich Salz)
-# * The scanner can use either a different set of regular expressions
-# per instance, or allows the subclass to define class fields with
-# the patterns. This improves performance when many Scanner objects
-# are being created, because the regular expressions don't have to
-# be recompiled each time. (thanks Amaury Forgeot d'Arc)
-# v2.0.2 changes (April 2002)
-# * Fixed a bug in generating the 'else' clause when the comment was too
-# long. v2.0.1 was missing a newline. (thanks Steven Engelhardt)
-# v2.0.3 changes (August 2002)
-# * Fixed a bug with inline tokens using the r"" syntax.
-
-from string import *
-from yappsrt import *
-import re
-import os.path
-
-INDENT = " "*4
-
-class Generator:
- def __init__(self, name, options, tokens, rules):
- self.change_count = 0
- self.name = name
- self.options = options
- self.preparser = ''
- self.postparser = None
-
- self.tokens = {} # Map from tokens to regexps
- self.ignore = [] # List of token names to ignore in parsing
- self.terminals = [] # List of token names (to maintain ordering)
- for n,t in tokens:
- if n == '#ignore':
- n = t
- self.ignore.append(n)
- if n in self.tokens.keys() and self.tokens[n] != t:
- print 'Warning: token', n, 'multiply defined.'
- self.tokens[n] = t
- self.terminals.append(n)
-
- self.rules = {} # Map from rule names to parser nodes
- self.params = {} # Map from rule names to parameters
- self.goals = [] # List of rule names (to maintain ordering)
- for n,p,r in rules:
- self.params[n] = p
- self.rules[n] = r
- self.goals.append(n)
-
- import sys
- self.output = sys.stdout
-
- def __getitem__(self, name):
- # Get options
- return self.options.get(name, 0)
-
- def non_ignored_tokens(self):
- return filter(lambda x, i=self.ignore: x not in i, self.terminals)
-
- def changed(self):
- self.change_count = 1+self.change_count
-
- def subset(self, a, b):
- "See if all elements of a are inside b"
- for x in a:
- if x not in b: return 0
- return 1
-
- def equal_set(self, a, b):
- "See if a and b have the same elements"
- if len(a) != len(b): return 0
- if a == b: return 1
- return self.subset(a, b) and self.subset(b, a)
-
- def add_to(self, parent, additions):
- "Modify parent to include all elements in additions"
- for x in additions:
- if x not in parent:
- parent.append(x)
- self.changed()
-
- def equate(self, a, b):
- self.add_to(a, b)
- self.add_to(b, a)
-
- def write(self, *args):
- for a in args:
- self.output.write(a)
-
- def in_test(self, x, full, b):
- if not b: return '0'
- if len(b)==1: return '%s == %s' % (x, `b[0]`)
- if full and len(b) > len(full)/2:
- # Reverse the sense of the test.
- not_b = filter(lambda x, b=b: x not in b, full)
- return self.not_in_test(x, full, not_b)
- return '%s in %s' % (x, `b`)
-
- def not_in_test(self, x, full, b):
- if not b: return '1'
- if len(b)==1: return '%s != %s' % (x, `b[0]`)
- return '%s not in %s' % (x, `b`)
-
- def peek_call(self, a):
- a_set = (`a`[1:-1])
- if self.equal_set(a, self.non_ignored_tokens()): a_set = ''
- if self['context-insensitive-scanner']: a_set = ''
- return 'self._peek(%s)' % a_set
-
- def peek_test(self, a, b):
- if self.subset(a, b): return '1'
- if self['context-insensitive-scanner']: a = self.non_ignored_tokens()
- return self.in_test(self.peek_call(a), a, b)
-
- def not_peek_test(self, a, b):
- if self.subset(a, b): return '0'
- return self.not_in_test(self.peek_call(a), a, b)
-
- def calculate(self):
- while 1:
- for r in self.goals:
- self.rules[r].setup(self, r)
- if self.change_count == 0: break
- self.change_count = 0
-
- while 1:
- for r in self.goals:
- self.rules[r].update(self)
- if self.change_count == 0: break
- self.change_count = 0
-
- def dump_information(self):
- self.calculate()
- for r in self.goals:
- print ' _____' + '_'*len(r)
- print ('___/Rule '+r+'\\' + '_'*80)[:79]
- queue = [self.rules[r]]
- while queue:
- top = queue[0]
- del queue[0]
-
- print `top`
- top.first.sort()
- top.follow.sort()
- eps = []
- if top.accepts_epsilon: eps = ['(null)']
- print ' FIRST:', join(top.first+eps, ', ')
- print ' FOLLOW:', join(top.follow, ', ')
- for x in top.get_children(): queue.append(x)
-
- def generate_output(self):
- self.calculate()
- self.write(self.preparser)
- self.write("from string import *\n")
- self.write("import re\n")
- self.write("from yappsrt import *\n")
- self.write("\n")
- self.write("class ", self.name, "Scanner(Scanner):\n")
- self.write(" patterns = [\n")
- for p in self.terminals:
- self.write(" (%s, re.compile(%s)),\n" % (
- `p`, `self.tokens[p]`))
- self.write(" ]\n")
- self.write(" def __init__(self, str):\n")
- self.write(" Scanner.__init__(self,None,%s,str)\n" %
- `self.ignore`)
- self.write("\n")
-
- self.write("class ", self.name, "(Parser):\n")
- for r in self.goals:
- self.write(INDENT, "def ", r, "(self")
- if self.params[r]: self.write(", ", self.params[r])
- self.write("):\n")
- self.rules[r].output(self, INDENT+INDENT)
- self.write("\n")
-
- self.write("\n")
- self.write("def parse(rule, text):\n")
- self.write(" P = ", self.name, "(", self.name, "Scanner(text))\n")
- self.write(" return wrap_error_reporter(P, rule)\n")
- self.write("\n")
- if self.postparser is not None:
- self.write(self.postparser)
- else:
- self.write("if __name__=='__main__':\n")
- self.write(INDENT, "from sys import argv, stdin\n")
- self.write(INDENT, "if len(argv) >= 2:\n")
- self.write(INDENT*2, "if len(argv) >= 3:\n")
- self.write(INDENT*3, "f = open(argv[2],'r')\n")
- self.write(INDENT*2, "else:\n")
- self.write(INDENT*3, "f = stdin\n")
- self.write(INDENT*2, "print parse(argv[1], f.read())\n")
- self.write(INDENT, "else: print 'Args: <rule> [<filename>]'\n")
-
-######################################################################
-class Node:
- def __init__(self):
- self.first = []
- self.follow = []
- self.accepts_epsilon = 0
- self.rule = '?'
-
- def setup(self, gen, rule):
- # Setup will change accepts_epsilon,
- # sometimes from 0 to 1 but never 1 to 0.
- # It will take a finite number of steps to set things up
- self.rule = rule
-
- def used(self, vars):
- "Return two lists: one of vars used, and the other of vars assigned"
- return vars, []
-
- def get_children(self):
- "Return a list of sub-nodes"
- return []
-
- def __repr__(self):
- return str(self)
-
- def update(self, gen):
- if self.accepts_epsilon:
- gen.add_to(self.first, self.follow)
-
- def output(self, gen, indent):
- "Write out code to _gen_ with _indent_:string indentation"
- gen.write(indent, "assert 0 # Invalid parser node\n")
-
-class Terminal(Node):
- def __init__(self, token):
- Node.__init__(self)
- self.token = token
- self.accepts_epsilon = 0
-
- def __str__(self):
- return self.token
-
- def update(self, gen):
- Node.update(self, gen)
- if self.first != [self.token]:
- self.first = [self.token]
- gen.changed()
-
- def output(self, gen, indent):
- gen.write(indent)
- if re.match('[a-zA-Z_]+$', self.token):
- gen.write(self.token, " = ")
- gen.write("self._scan(%s)\n" % `self.token`)
-
-class Eval(Node):
- def __init__(self, expr):
- Node.__init__(self)
- self.expr = expr
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- if not self.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def __str__(self):
- return '{{ %s }}' % strip(self.expr)
-
- def output(self, gen, indent):
- gen.write(indent, strip(self.expr), '\n')
-
-class NonTerminal(Node):
- def __init__(self, name, args):
- Node.__init__(self)
- self.name = name
- self.args = args
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- try:
- self.target = gen.rules[self.name]
- if self.accepts_epsilon != self.target.accepts_epsilon:
- self.accepts_epsilon = self.target.accepts_epsilon
- gen.changed()
- except KeyError: # Oops, it's nonexistent
- print 'Error: no rule <%s>' % self.name
- self.target = self
-
- def __str__(self):
- return '<%s>' % self.name
-
- def update(self, gen):
- Node.update(self, gen)
- gen.equate(self.first, self.target.first)
- gen.equate(self.follow, self.target.follow)
-
- def output(self, gen, indent):
- gen.write(indent)
- gen.write(self.name, " = ")
- gen.write("self.", self.name, "(", self.args, ")\n")
-
-class Sequence(Node):
- def __init__(self, *children):
- Node.__init__(self)
- self.children = children
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- for c in self.children: c.setup(gen, rule)
-
- if not self.accepts_epsilon:
- # If it's not already accepting epsilon, it might now do so.
- for c in self.children:
- # any non-epsilon means all is non-epsilon
- if not c.accepts_epsilon: break
- else:
- self.accepts_epsilon = 1
- gen.changed()
-
- def get_children(self):
- return self.children
-
- def __str__(self):
- return '( %s )' % join(map(lambda x: str(x), self.children))
-
- def update(self, gen):
- Node.update(self, gen)
- for g in self.children:
- g.update(gen)
-
- empty = 1
- for g_i in range(len(self.children)):
- g = self.children[g_i]
-
- if empty: gen.add_to(self.first, g.first)
- if not g.accepts_epsilon: empty = 0
-
- if g_i == len(self.children)-1:
- next = self.follow
- else:
- next = self.children[1+g_i].first
- gen.add_to(g.follow, next)
-
- if self.children:
- gen.add_to(self.follow, self.children[-1].follow)
-
- def output(self, gen, indent):
- if self.children:
- for c in self.children:
- c.output(gen, indent)
- else:
- # Placeholder for empty sequences, just in case
- gen.write(indent, 'pass\n')
-
-class Choice(Node):
- def __init__(self, *children):
- Node.__init__(self)
- self.children = children
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- for c in self.children: c.setup(gen, rule)
-
- if not self.accepts_epsilon:
- for c in self.children:
- if c.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def get_children(self):
- return self.children
-
- def __str__(self):
- return '( %s )' % join(map(lambda x: str(x), self.children), ' | ')
-
- def update(self, gen):
- Node.update(self, gen)
- for g in self.children:
- g.update(gen)
-
- for g in self.children:
- gen.add_to(self.first, g.first)
- gen.add_to(self.follow, g.follow)
- for g in self.children:
- gen.add_to(g.follow, self.follow)
- if self.accepts_epsilon:
- gen.add_to(self.first, self.follow)
-
- def output(self, gen, indent):
- test = "if"
- gen.write(indent, "_token_ = ", gen.peek_call(self.first), "\n")
- tokens_seen = []
- tokens_unseen = self.first[:]
- if gen['context-insensitive-scanner']:
- # Context insensitive scanners can return ANY token,
- # not only the ones in first.
- tokens_unseen = gen.non_ignored_tokens()
- for c in self.children:
- testset = c.first[:]
- removed = []
- for x in testset:
- if x in tokens_seen:
- testset.remove(x)
- removed.append(x)
- if x in tokens_unseen: tokens_unseen.remove(x)
- tokens_seen = tokens_seen + testset
- if removed:
- if not testset:
- print 'Error in rule', self.rule+':', c, 'never matches.'
- else:
- print 'Warning:', self
- print ' * These tokens are being ignored:', join(removed, ', ')
- print ' due to previous choices using them.'
-
- if testset:
- if not tokens_unseen: # context sensitive scanners only!
- if test=='if':
- # if it's the first AND last test, then
- # we can simply put the code without an if/else
- c.output(gen, indent)
- else:
- gen.write(indent, "else: ")
- t = gen.in_test('', [], testset)
- if len(t) < 70-len(indent):
- gen.write("#", t)
- gen.write("\n")
- c.output(gen, indent+INDENT)
- else:
- gen.write(indent, test, " ",
- gen.in_test('_token_', tokens_unseen, testset),
- ":\n")
- c.output(gen, indent+INDENT)
- test = "elif"
-
- if gen['context-insensitive-scanner'] and tokens_unseen:
- gen.write(indent, "else:\n")
- gen.write(indent, INDENT, "raise SyntaxError(self._pos, ")
- gen.write("'Could not match ", self.rule, "')\n")
-
-class Wrapper(Node):
- def __init__(self, child):
- Node.__init__(self)
- self.child = child
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- self.child.setup(gen, rule)
-
- def get_children(self):
- return [self.child]
-
- def update(self, gen):
- Node.update(self, gen)
- self.child.update(gen)
- gen.add_to(self.first, self.child.first)
- gen.equate(self.follow, self.child.follow)
-
-class Option(Wrapper):
- def setup(self, gen, rule):
- Wrapper.setup(self, gen, rule)
- if not self.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def __str__(self):
- return '[ %s ]' % str(self.child)
-
- def output(self, gen, indent):
- if self.child.accepts_epsilon:
- print 'Warning in rule', self.rule+': contents may be empty.'
- gen.write(indent, "if %s:\n" %
- gen.peek_test(self.first, self.child.first))
- self.child.output(gen, indent+INDENT)
-
-class Plus(Wrapper):
- def setup(self, gen, rule):
- Wrapper.setup(self, gen, rule)
- if self.accepts_epsilon != self.child.accepts_epsilon:
- self.accepts_epsilon = self.child.accepts_epsilon
- gen.changed()
-
- def __str__(self):
- return '%s+' % str(self.child)
-
- def update(self, gen):
- Wrapper.update(self, gen)
- gen.add_to(self.follow, self.first)
-
- def output(self, gen, indent):
- if self.child.accepts_epsilon:
- print 'Warning in rule', self.rule+':'
- print ' * The repeated pattern could be empty. The resulting'
- print ' parser may not work properly.'
- gen.write(indent, "while 1:\n")
- self.child.output(gen, indent+INDENT)
- union = self.first[:]
- gen.add_to(union, self.follow)
- gen.write(indent+INDENT, "if %s: break\n" %
- gen.not_peek_test(union, self.child.first))
-
-class Star(Plus):
- def setup(self, gen, rule):
- Wrapper.setup(self, gen, rule)
- if not self.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def __str__(self):
- return '%s*' % str(self.child)
-
- def output(self, gen, indent):
- if self.child.accepts_epsilon:
- print 'Warning in rule', self.rule+':'
- print ' * The repeated pattern could be empty. The resulting'
- print ' parser probably will not work properly.'
- gen.write(indent, "while %s:\n" %
- gen.peek_test(self.follow, self.child.first))
- self.child.output(gen, indent+INDENT)
-
-######################################################################
-# The remainder of this file is from parsedesc.{g,py}
-
-def append(lst, x):
- "Imperative append"
- lst.append(x)
- return lst
-
-def add_inline_token(tokens, str):
- tokens.insert( 0, (str, eval(str, {}, {})) )
- return Terminal(str)
-
-def cleanup_choice(lst):
- if len(lst) == 0: return Sequence([])
- if len(lst) == 1: return lst[0]
- return apply(Choice, tuple(lst))
-
-def cleanup_sequence(lst):
- if len(lst) == 1: return lst[0]
- return apply(Sequence, tuple(lst))
-
-def cleanup_rep(node, rep):
- if rep == 'star': return Star(node)
- elif rep == 'plus': return Plus(node)
- else: return node
-
-def resolve_name(tokens, id, args):
- if id in map(lambda x: x[0], tokens):
- # It's a token
- if args:
- print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
- return Terminal(id)
- else:
- # It's a name, so assume it's a nonterminal
- return NonTerminal(id, args)
-
-
-from string import *
-import re
-from yappsrt import *
-
-class ParserDescriptionScanner(Scanner):
- def __init__(self, str):
- Scanner.__init__(self,[
- ('"rule"', 'rule'),
- ('"ignore"', 'ignore'),
- ('"token"', 'token'),
- ('"option"', 'option'),
- ('":"', ':'),
- ('"parser"', 'parser'),
- ('[ \011\015\012]+', '[ \011\015\012]+'),
- ('#.*?\015?\012', '#.*?\015?\012'),
- ('END', '$'),
- ('ATTR', '<<.+?>>'),
- ('STMT', '{{.+?}}'),
- ('ID', '[a-zA-Z_][a-zA-Z_0-9]*'),
- ('STR', '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'),
- ('LP', '\\('),
- ('RP', '\\)'),
- ('LB', '\\['),
- ('RB', '\\]'),
- ('OR', '[|]'),
- ('STAR', '[*]'),
- ('PLUS', '[+]'),
- ], ['[ \011\015\012]+', '#.*?\015?\012'], str)
-
-class ParserDescription(Parser):
- def Parser(self):
- self._scan('"parser"')
- ID = self._scan('ID')
- self._scan('":"')
- Options = self.Options()
- Tokens = self.Tokens()
- Rules = self.Rules(Tokens)
- END = self._scan('END')
- return Generator(ID,Options,Tokens,Rules)
-
- def Options(self):
- opt = {}
- while self._peek('"option"', '"token"', '"ignore"', 'END', '"rule"') == '"option"':
- self._scan('"option"')
- self._scan('":"')
- Str = self.Str()
- opt[Str] = 1
- return opt
-
- def Tokens(self):
- tok = []
- while self._peek('"token"', '"ignore"', 'END', '"rule"') in ['"token"', '"ignore"']:
- _token_ = self._peek('"token"', '"ignore"')
- if _token_ == '"token"':
- self._scan('"token"')
- ID = self._scan('ID')
- self._scan('":"')
- Str = self.Str()
- tok.append( (ID,Str) )
- else: # == '"ignore"'
- self._scan('"ignore"')
- self._scan('":"')
- Str = self.Str()
- tok.append( ('#ignore',Str) )
- return tok
-
- def Rules(self, tokens):
- rul = []
- while self._peek('"rule"', 'END') == '"rule"':
- self._scan('"rule"')
- ID = self._scan('ID')
- OptParam = self.OptParam()
- self._scan('":"')
- ClauseA = self.ClauseA(tokens)
- rul.append( (ID,OptParam,ClauseA) )
- return rul
-
- def ClauseA(self, tokens):
- ClauseB = self.ClauseB(tokens)
- v = [ClauseB]
- while self._peek('OR', 'RP', 'RB', '"rule"', 'END') == 'OR':
- OR = self._scan('OR')
- ClauseB = self.ClauseB(tokens)
- v.append(ClauseB)
- return cleanup_choice(v)
-
- def ClauseB(self, tokens):
- v = []
- while self._peek('STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') in ['STR', 'ID', 'LP', 'LB', 'STMT']:
- ClauseC = self.ClauseC(tokens)
- v.append(ClauseC)
- return cleanup_sequence(v)
-
- def ClauseC(self, tokens):
- ClauseD = self.ClauseD(tokens)
- _token_ = self._peek('PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END')
- if _token_ == 'PLUS':
- PLUS = self._scan('PLUS')
- return Plus(ClauseD)
- elif _token_ == 'STAR':
- STAR = self._scan('STAR')
- return Star(ClauseD)
- else:
- return ClauseD
-
- def ClauseD(self, tokens):
- _token_ = self._peek('STR', 'ID', 'LP', 'LB', 'STMT')
- if _token_ == 'STR':
- STR = self._scan('STR')
- t = (STR, eval(STR,{},{}))
- if t not in tokens: tokens.insert( 0, t )
- return Terminal(STR)
- elif _token_ == 'ID':
- ID = self._scan('ID')
- OptParam = self.OptParam()
- return resolve_name(tokens, ID, OptParam)
- elif _token_ == 'LP':
- LP = self._scan('LP')
- ClauseA = self.ClauseA(tokens)
- RP = self._scan('RP')
- return ClauseA
- elif _token_ == 'LB':
- LB = self._scan('LB')
- ClauseA = self.ClauseA(tokens)
- RB = self._scan('RB')
- return Option(ClauseA)
- else: # == 'STMT'
- STMT = self._scan('STMT')
- return Eval(STMT[2:-2])
-
- def OptParam(self):
- if self._peek('ATTR', '":"', 'PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END') == 'ATTR':
- ATTR = self._scan('ATTR')
- return ATTR[2:-2]
- return ''
-
- def Str(self):
- STR = self._scan('STR')
- return eval(STR,{},{})
-
-
-
-
-
-# This replaces the default main routine
-
-yapps_options = [
- ('context-insensitive-scanner', 'context-insensitive-scanner',
- 'Scan all tokens (see docs)')
- ]
-
-def generate(inputfilename, outputfilename='', dump=0, **flags):
- """Generate a grammar, given an input filename (X.g)
- and an output filename (defaulting to X.py)."""
-
- if not outputfilename:
- if inputfilename[-2:]=='.g': outputfilename = inputfilename[:-2]+'.py'
- else: raise "Invalid Filename", outputfilename
-
- print ' SCONFIG ', join(outputfilename.split('/')[-4:], '/')
-
- DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
- preparser, postparser = None, None # Code before and after the parser desc
-
- # Read the entire file
- s = open(inputfilename,'r').read()
-
- # See if there's a separation between the pre-parser and parser
- f = find(s, DIVIDER)
- if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):]
-
- # See if there's a separation between the parser and post-parser
- f = find(s, DIVIDER)
- if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):]
-
- # Create the parser and scanner
- p = ParserDescription(ParserDescriptionScanner(s))
- if not p: return
-
- # Now parse the file
- t = wrap_error_reporter(p, 'Parser')
- if not t: return # Error
- if preparser is not None: t.preparser = preparser
- if postparser is not None: t.postparser = postparser
-
- # Check the options
- for f in t.options.keys():
- for opt,_,_ in yapps_options:
- if f == opt: break
- else:
- print 'Warning: unrecognized option', f
- # Add command line options to the set
- for f in flags.keys(): t.options[f] = flags[f]
-
- # Generate the output
- if dump:
- t.dump_information()
- else:
- t.output = open(outputfilename, 'w')
- t.generate_output()
-
-if __name__=='__main__':
- import sys, getopt
- optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['dump'])
- if not args or len(args) > 2:
- print 'Usage:'
- print ' python', sys.argv[0], '[flags] input.g [output.py]'
- print 'Flags:'
- print (' --dump' + ' '*40)[:35] + 'Dump out grammar information'
- for flag, _, doc in yapps_options:
- print (' -f' + flag + ' '*40)[:35] + doc
- else:
- # Read in the options and create a list of flags
- flags = {}
- for opt in optlist:
- for flag, name, _ in yapps_options:
- if opt == ('-f', flag):
- flags[name] = 1
- break
- else:
- if opt == ('--dump', ''):
- flags['dump'] = 1
- else:
- print 'Warning - unrecognized option: ', opt[0], opt[1]
-
- apply(generate, tuple(args), flags)
diff --git a/util/sconfig/yapps2.tex b/util/sconfig/yapps2.tex
deleted file mode 100644
index 9d2bddf19c..0000000000
--- a/util/sconfig/yapps2.tex
+++ /dev/null
@@ -1,1225 +0,0 @@
-\documentclass[10pt]{article}
-\usepackage{palatino}
-\usepackage{html}
-\usepackage{color}
-
-\setlength{\headsep}{0in}
-\setlength{\headheight}{0in}
-\setlength{\textheight}{8.5in}
-\setlength{\textwidth}{5.9in}
-\setlength{\oddsidemargin}{0.25in}
-
-\definecolor{darkblue}{rgb}{0,0,0.6}
-\definecolor{darkerblue}{rgb}{0,0,0.3}
-
-%% \newcommand{\mysection}[1]{\section{\textcolor{darkblue}{#1}}}
-%% \newcommand{\mysubsection}[1]{\subsection{\textcolor{darkerblue}{#1}}}
-\newcommand{\mysection}[1]{\section{#1}}
-\newcommand{\mysubsection}[1]{\subsection{#1}}
-
-\bodytext{bgcolor=white text=black link=#004080 vlink=#006020}
-
-\newcommand{\first}{\textsc{first}}
-\newcommand{\follow}{\textsc{follow}}
-
-\begin{document}
-
-\begin{center}
-\hfill \begin{tabular}{c}
-{\Large The \emph{Yapps} Parser Generator System}\\
-\verb|http://theory.stanford.edu/~amitp/Yapps/|\\
- Version 2\\
-\\
-Amit J. Patel\\
-\htmladdnormallink{http://www-cs-students.stanford.edu/~amitp/}
-{http://www-cs-students.stanford.edu/~amitp/}
-
-\end{tabular} \hfill \rule{0in}{0in}
-\end{center}
-
-\mysection{Introduction}
-
-\emph{Yapps} (\underline{Y}et \underline{A}nother \underline{P}ython
-\underline{P}arser \underline{S}ystem) is an easy to use parser
-generator that is written in Python and generates Python code. There
-are several parser generator systems already available for Python,
-including \texttt{PyLR, kjParsing, PyBison,} and \texttt{mcf.pars,}
-but I had different goals for my parser. Yapps is simple, is easy to
-use, and produces human-readable parsers. It is not the fastest or
-most powerful parser. Yapps is designed to be used when regular
-expressions are not enough and other parser systems are too much:
-situations where you may write your own recursive descent parser.
-
-Some unusual features of Yapps that may be of interest are:
-
-\begin{enumerate}
-
- \item Yapps produces recursive descent parsers that are readable by
- humans, as opposed to table-driven parsers that are difficult to
- read. A Yapps parser for a simple calculator looks similar to the
- one that Mark Lutz wrote by hand for \emph{Programming Python.}
-
- \item Yapps also allows for rules that accept parameters and pass
- arguments to be used while parsing subexpressions. Grammars that
- allow for arguments to be passed to subrules and for values to be
- passed back are often called \emph{attribute grammars.} In many
- cases parameterized rules can be used to perform actions at ``parse
- time'' that are usually delayed until later. For example,
- information about variable declarations can be passed into the
- rules that parse a procedure body, so that undefined variables can
- be detected at parse time. The types of defined variables can be
- used in parsing as well---for example, if the type of {\tt X} is
- known, we can determine whether {\tt X(1)} is an array reference or
- a function call.
-
- \item Yapps grammars are fairly easy to write, although there are
- some inconveniences having to do with ELL(1) parsing that have to be
- worked around. For example, rules have to be left factored and
- rules may not be left recursive. However, neither limitation seems
- to be a problem in practice.
-
- Yapps grammars look similar to the notation used in the Python
- reference manual, with operators like \verb:*:, \verb:+:, \verb:|:,
- \verb:[]:, and \verb:(): for patterns, names ({\tt tim}) for rules,
- regular expressions (\verb:"[a-z]+":) for tokens, and \verb:#: for
- comments.
-
- \item The Yapps parser generator is written as a single Python module
- with no C extensions. Yapps produces parsers that are written
- entirely in Python, and require only the Yapps run-time module (5k)
- for support.
-
- \item Yapps's scanner is context-sensitive, picking tokens based on
- the types of the tokens accepted by the parser. This can be
- helpful when implementing certain kinds of parsers, such as for a
- preprocessor.
-
-\end{enumerate}
-
-There are several disadvantages of using Yapps over another parser system:
-
-\begin{enumerate}
-
- \item Yapps parsers are \texttt{ELL(1)} (Extended LL(1)), which is
- less powerful than \texttt{LALR} (used by \texttt{PyLR}) or
- \texttt{SLR} (used by \texttt{kjParsing}), so Yapps would not be a
- good choice for parsing complex languages. For example, allowing
- both \texttt{x := 5;} and \texttt{x;} as statements is difficult
- because we must distinguish based on only one token of lookahead.
- Seeing only \texttt{x}, we cannot decide whether we have an
- assignment statement or an expression statement. (Note however
- that this kind of grammar can be matched with backtracking; see
- section \ref{sec:future}.)
-
- \item The scanner that Yapps provides can only read from strings, not
- files, so an entire file has to be read in before scanning can
- begin. It is possible to build a custom scanner, though, so in
- cases where stream input is needed (from the console, a network, or
- a large file are examples), the Yapps parser can be given a custom
- scanner that reads from a stream instead of a string.
-
- \item Yapps is not designed with efficiency in mind.
-
-\end{enumerate}
-
-Yapps provides an easy to use parser generator that produces parsers
-similar to what you might write by hand. It is not meant to be a
-solution for all parsing problems, but instead an aid for those times
-you would write a parser by hand rather than using one of the more
-powerful parsing packages available.
-
-Yapps 2.0 is easier to use than Yapps 1.0. New features include a
-less restrictive input syntax, which allows mixing of sequences,
-choices, terminals, and nonterminals; optional matching; the ability
-to insert single-line statements into the generated parser; and
-looping constructs \verb|*| and \verb|+| similar to the repetitive
-matching constructs in regular expressions. Unfortunately, the
-addition of these constructs has made Yapps 2.0 incompatible with
-Yapps 1.0, so grammars will have to be rewritten. See section
-\ref{sec:Upgrading} for tips on changing Yapps 1.0 grammars for use
-with Yapps 2.0.
-
-\mysection{Examples}
-
-In this section are several examples that show the use of Yapps.
-First, an introduction shows how to construct grammars and write them
-in Yapps form. This example can be skipped by someone familiar with
-grammars and parsing. Next is a Lisp expression grammar that produces
-a parse tree as output. This example demonstrates the use of tokens
-and rules, as well as returning values from rules. The third example
-is a expression evaluation grammar that evaluates during parsing
-(instead of producing a parse tree).
-
-\mysubsection{Introduction to Grammars}
-
-A \emph{grammar} for a natural language specifies how words can be put
-together to form large structures, such as phrases and sentences. A
-grammar for a computer language is similar in that it specifies how
-small components (called \emph{tokens}) can be put together to form
-larger structures. In this section we will write a grammar for a tiny
-subset of English.
-
-Simple English sentences can be described as being a noun phrase
-followed by a verb followed by a noun phrase. For example, in the
-sentence, ``Jack sank the blue ship,'' the word ``Jack'' is the first
-noun phrase, ``sank'' is the verb, and ``the blue ship'' is the second
-noun phrase. In addition we should say what a noun phrase is; for
-this example we shall say that a noun phrase is an optional article
-(a, an, the) followed by any number of adjectives followed by a noun.
-The tokens in our language are the articles, nouns, verbs, and
-adjectives. The \emph{rules} in our language will tell us how to
-combine the tokens together to form lists of adjectives, noun phrases,
-and sentences:
-
-\begin{itemize}
- \item \texttt{sentence: noun\_phrase verb noun\_phrase}
- \item \texttt{noun\_phrase: [article] adjective* noun}
-\end{itemize}
-
-Notice that some things that we said easily in English, such as
-``optional article,'' are expressed using special syntax, such as
-brackets. When we said, ``any number of adjectives,'' we wrote
-\texttt{adjective*}, where the \texttt{*} means ``zero or more of the
-preceding pattern''.
-
-The grammar given above is close to a Yapps grammar. We also have to
-specify what the tokens are, and what to do when a pattern is matched.
-For this example, we will do nothing when patterns are matched; the
-next example will explain how to perform match actions.
-
-\begin{verbatim}
-parser TinyEnglish:
- ignore: "\\W+"
- token noun: "(Jack|spam|ship)"
- token verb: "(sank|threw)"
- token article: "(a|an|the)"
- token adjective: "(blue|red|green)"
-
- rule sentence: noun_phrase verb noun_phrase
- rule noun_phrase: [article] adjective* noun
-\end{verbatim}
-
-The tokens are specified as Python \emph{regular expressions}. Since
-Yapps produces Python code, you can write any regular expression that
-would be accepted by Python. (\emph{Note:} These are Python 1.5
-regular expressions from the \texttt{re} module, not Python 1.4
-regular expressions from the \texttt{regex} module.) In addition to
-tokens that you want to see (which are given names), you can also
-specify tokens to ignore, marked by the \texttt{ignore} keyword. In
-this parser we want to ignore whitespace.
-
-The TinyEnglish grammar shows how you define tokens and rules, but it
-does not specify what should happen once we've matched the rules. In
-the next example, we will take a grammar and produce a \emph{parse
-tree} from it.
-
-\mysubsection{Lisp Expressions}
-
-Lisp syntax, although hated by many, has a redeeming quality: it is
-simple to parse. In this section we will construct a Yapps grammar to
-parse Lisp expressions and produce a parse tree as output.
-
-\subsubsection*{Defining the Grammar}
-
-The syntax of Lisp is simple. It has expressions, which are
-identifiers, strings, numbers, and lists. A list is a left
-parenthesis followed by some number of expressions (separated by
-spaces) followed by a right parenthesis. For example, \verb|5|,
-\verb|"ni"|, and \verb|(print "1+2 = " (+ 1 2))| are Lisp expressions.
-Written as a grammar,
-
-\begin{verbatim}
- expr: ID | STR | NUM | list
- list: ( expr* )
-\end{verbatim}
-
-In addition to having a grammar, we need to specify what to do every
-time something is matched. For the tokens, which are strings, we just
-want to get the ``value'' of the token, attach its type (identifier,
-string, or number) in some way, and return it. For the lists, we want
-to construct and return a Python list.
-
-Once some pattern is matched, we enclose a return statement enclosed
-in \verb|{{...}}|. The braces allow us to insert any one-line
-statement into the parser. Within this statement, we can refer to the
-values returned by matching each part of the rule. After matching a
-token such as \texttt{ID}, ``ID'' will be bound to the text of the
-matched token. Let's take a look at the rule:
-
-\begin{verbatim}
- rule expr: ID {{ return ('id', ID) }}
- ...
-\end{verbatim}
-
-In a rule, tokens return the text that was matched. For identifiers,
-we just return the identifier, along with a ``tag'' telling us that
-this is an identifier and not a string or some other value. Sometimes
-we may need to convert this text to a different form. For example, if
-a string is matched, we want to remove quotes and handle special forms
-like \verb|\n|. If a number is matched, we want to convert it into a
-number. Let's look at the return values for the other tokens:
-
-\begin{verbatim}
- ...
- | STR {{ return ('str', eval(STR)) }}
- | NUM {{ return ('num', atoi(NUM)) }}
- ...
-\end{verbatim}
-
-If we get a string, we want to remove the quotes and process any
-special backslash codes, so we run \texttt{eval} on the quoted string.
-If we get a number, we convert it to an integer with \texttt{atoi} and
-then return the number along with its type tag.
-
-For matching a list, we need to do something slightly more
-complicated. If we match a Lisp list of expressions, we want to
-create a Python list with those values.
-
-\begin{verbatim}
- rule list: "\\(" # Match the opening parenthesis
- {{ result = [] }} # Create a Python list
- (
- expr # When we match an expression,
- {{ result.append(expr) }} # add it to the list
- )* # * means repeat this if needed
- "\\)" # Match the closing parenthesis
- {{ return result }} # Return the Python list
-\end{verbatim}
-
-In this rule we first match the opening parenthesis, then go into a
-loop. In this loop we match expressions and add them to the list.
-When there are no more expressions to match, we match the closing
-parenthesis and return the resulting. Note that \verb:#: is used for
-comments, just as in Python.
-
-The complete grammar is specified as follows:
-\begin{verbatim}
-parser Lisp:
- ignore: '\\s+'
- token NUM: '[0-9]+'
- token ID: '[-+*/!@%^&=.a-zA-Z0-9_]+'
- token STR: '"([^\\"]+|\\\\.)*"'
-
- rule expr: ID {{ return ('id', ID) }}
- | STR {{ return ('str', eval(STR)) }}
- | NUM {{ return ('num', atoi(NUM)) }}
- | list {{ return list }}
- rule list: "\\(" {{ result = [] }}
- ( expr {{ result.append(expr) }}
- )*
- "\\)" {{ return result }}
-\end{verbatim}
-
-One thing you may have noticed is that \verb|"\\("| and \verb|"\\)"|
-appear in the \texttt{list} rule. These are \emph{inline tokens}:
-they appear in the rules without being given a name with the
-\texttt{token} keyword. Inline tokens are more convenient to use, but
-since they do not have a name, the text that is matched cannot be used
-in the return value. They are best used for short simple patterns
-(usually punctuation or keywords).
-
-Another thing to notice is that the number and identifier tokens
-overlap. For example, ``487'' matches both NUM and ID. In Yapps, the
-scanner only tries to match tokens that are acceptable to the parser.
-This rule doesn't help here, since both NUM and ID can appear in the
-same place in the grammar. There are two rules used to pick tokens if
-more than one matches. One is that the \emph{longest} match is
-preferred. For example, ``487x'' will match as an ID (487x) rather
-than as a NUM (487) followed by an ID (x). The second rule is that if
-the two matches are the same length, the \emph{first} one listed in
-the grammar is preferred. For example, ``487'' will match as an NUM
-rather than an ID because NUM is listed first in the grammar. Inline
-tokens have preference over any tokens you have listed.
-
-Now that our grammar is defined, we can run Yapps to produce a parser,
-and then run the parser to produce a parse tree.
-
-\subsubsection*{Running Yapps}
-
-In the Yapps module is a function \texttt{generate} that takes an
-input filename and writes a parser to another file. We can use this
-function to generate the Lisp parser, which is assumed to be in
-\texttt{lisp.g}.
-
-\begin{verbatim}
-% python
-Python 1.5.1 (#1, Sep 3 1998, 22:51:17) [GCC 2.7.2.3] on linux-i386
-Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam
->>> import yapps
->>> yapps.generate('lisp.g')
-\end{verbatim}
-
-At this point, Yapps has written a file \texttt{lisp.py} that contains
-the parser. In that file are two classes (one scanner and one parser)
-and a function (called \texttt{parse}) that puts things together for
-you.
-
-Alternatively, we can run Yapps from the command line to generate the
-parser file:
-
-\begin{verbatim}
-% python yapps.py lisp.g
-\end{verbatim}
-
-After running Yapps either from within Python or from the command
-line, we can use the Lisp parser by calling the \texttt{parse}
-function. The first parameter should be the rule we want to match,
-and the second parameter should be the string to parse.
-
-\begin{verbatim}
->>> import lisp
->>> lisp.parse('expr', '(+ 3 4)')
-[('id', '+'), ('num', 3), ('num', 4)]
->>> lisp.parse('expr', '(print "3 = " (+ 1 2))')
-[('id', 'print'), ('str', '3 = '), [('id', '+'), ('num', 1), ('num', 2)]]
-\end{verbatim}
-
-The \texttt{parse} function is not the only way to use the parser;
-section \ref{sec:Parser-Objects} describes how to access parser objects
-directly.
-
-We've now gone through the steps in creating a grammar, writing a
-grammar file for Yapps, producing a parser, and using the parser. In
-the next example we'll see how rules can take parameters and also how
-to do computations instead of just returning a parse tree.
-
-\mysubsection{Calculator}
-
-A common example parser given in many textbooks is that for simple
-expressions, with numbers, addition, subtraction, multiplication,
-division, and parenthesization of subexpressions. We'll write this
-example in Yapps, evaluating the expression as we parse.
-
-Unlike \texttt{yacc}, Yapps does not have any way to specify
-precedence rules, so we have to do it ourselves. We say that an
-expression is the sum of terms, and that a term is the product of
-factors, and that a factor is a number or a parenthesized expression:
-
-\begin{verbatim}
- expr: factor ( ("+"|"-") factor )*
- factor: term ( ("*"|"/") term )*
- term: NUM | "(" expr ")"
-\end{verbatim}
-
-In order to evaluate the expression as we go, we should keep along an
-accumulator while evaluating the lists of terms or factors. Just as
-we kept a ``result'' variable to build a parse tree for Lisp
-expressions, we will use a variable to evaluate numerical
-expressions. The full grammar is given below:
-
-\begin{verbatim}
-parser Calculator:
- token END: "$" # $ means end of string
- token NUM: "[0-9]+"
-
- rule goal: expr END {{ return expr }}
-
- # An expression is the sum and difference of factors
- rule expr: factor {{ v = factor }}
- ( "[+]" factor {{ v = v+factor }}
- | "-" factor {{ v = v-factor }}
- )* {{ return v }}
-
- # A factor is the product and division of terms
- rule factor: term {{ v = term }}
- ( "[*]" term {{ v = v*term }}
- | "/" term {{ v = v/term }}
- )* {{ return v }}
-
- # A term is either a number or an expression surrounded by parentheses
- rule term: NUM {{ return atoi(NUM) }}
- | "\\(" expr "\\)" {{ return expr }}
-\end{verbatim}
-
-The top-level rule is \emph{goal}, which says that we are looking for
-an expression followed by the end of the string. The \texttt{END}
-token is needed because without it, it isn't clear when to stop
-parsing. For example, the string ``1+3'' could be parsed either as
-the expression ``1'' followed by the string ``+3'' or it could be
-parsed as the expression ``1+3''. By requiring expressions to end
-with \texttt{END}, the parser is forced to take ``1+3''.
-
-In the two rules with repetition, the accumulator is named \texttt{v}.
-After reading in one expression, we initialize the accumulator. Each
-time through the loop, we modify the accumulator by adding,
-subtracting, multiplying by, or dividing the previous accumulator by
-the expression that has been parsed. At the end of the rule, we
-return the accumulator.
-
-The calculator example shows how to process lists of elements using
-loops, as well as how to handle precedence of operators.
-
-\emph{Note:} It's often important to put the \texttt{END} token in, so
-put it in unless you are sure that your grammar has some other
-non-ambiguous token marking the end of the program.
-
-\mysubsection{Calculator with Memory}
-
-In the previous example we learned how to write a calculator that
-evaluates simple numerical expressions. In this section we will
-extend the example to support both local and global variables.
-
-To support global variables, we will add assignment statements to the
-``goal'' rule.
-
-\begin{verbatim}
- rule goal: expr END {{ return expr }}
- | 'set' ID expr END {{ global_vars[ID] = expr }}
- {{ return expr }}
-\end{verbatim}
-
-To use these variables, we need a new kind of terminal:
-
-\begin{verbatim}
- rule term: ... | ID {{ return global_vars[ID] }}
-\end{verbatim}
-
-So far, these changes are straightforward. We simply have a global
-dictionary \texttt{global\_vars} that stores the variables and values,
-we modify it when there is an assignment statement, and we look up
-variables in it when we see a variable name.
-
-To support local variables, we will add variable declarations to the
-set of allowed expressions.
-
-\begin{verbatim}
- rule term: ... | 'let' VAR '=' expr 'in' expr ...
-\end{verbatim}
-
-This is where it becomes tricky. Local variables should be stored in
-a local dictionary, not in the global one. One trick would be to save
-a copy of the global dictionary, modify it, and then restore it
-later. In this example we will instead use \emph{attributes} to
-create local information and pass it to subrules.
-
-A rule can optionally take parameters. When we invoke the rule, we
-must pass in arguments. For local variables, let's use a single
-parameter, \texttt{local\_vars}:
-
-\begin{verbatim}
- rule expr<<local_vars>>: ...
- rule factor<<local_vars>>: ...
- rule term<<local_vars>>: ...
-\end{verbatim}
-
-Each time we want to match \texttt{expr}, \texttt{factor}, or
-\texttt{term}, we will pass the local variables in the current rule to
-the subrule. One interesting case is when we pass as an argument
-something \emph{other} than \texttt{local\_vars}:
-
-\begin{verbatim}
- rule term<<local_vars>>: ...
- | 'let' VAR '=' expr<<local_vars>>
- {{ local_vars = [(VAR, expr)] + local_vars }}
- 'in' expr<<local_vars>>
- {{ return expr }}
-\end{verbatim}
-
-Note that the assignment to the local variables list does not modify
-the original list. This is important to keep local variables from
-being seen outside the ``let''.
-
-The other interesting case is when we find a variable:
-
-\begin{verbatim}
-global_vars = {}
-
-def lookup(map, name):
- for x,v in map: if x==name: return v
- return global_vars[name]
-%%
- ...
- rule term<<local_vars>: ...
- | VAR {{ return lookup(local_vars, VAR) }}
-\end{verbatim}
-
-The lookup function will search through the local variable list, and
-if it cannot find the name there, it will look it up in the global
-variable dictionary.
-
-A complete grammar for this example, including a read-eval-print loop
-for interacting with the calculator, can be found in the examples
-subdirectory included with Yapps.
-
-In this section we saw how to insert code before the parser. We also
-saw how to use attributes to transmit local information from one rule
-to its subrules.
-
-\mysection{Grammars}
-
-Each Yapps grammar has a name, a list of tokens, and a set of
-production rules. A grammar named \texttt{X} will be used to produce
-a parser named \texttt{X} and a scanner anmed \texttt{XScanner}. As
-in Python, names are case sensitive, start with a letter, and contain
-letters, numbers, and underscores (\_).
-
-There are three kinds of tokens in Yapps: named, inline, and ignored.
-As their name implies, named tokens are given a name, using the token
-construct: \texttt{token \emph{name} : \emph{regexp}}. In a rule, the
-token can be matched by using the name. Inline tokens are regular
-expressions that are used in rules without being declared. Ignored
-tokens are declared using the ignore construct: \texttt{ignore:
- \emph{regexp}}. These tokens are ignored by the scanner, and are
-not seen by the parser. Often whitespace is an ignored token. The
-regular expressions used to define tokens should use the syntax
-defined in the \texttt{re} module, so some symbols may have to be
-backslashed.
-
-Production rules in Yapps have a name and a pattern to match. If the
-rule is parameterized, the name should be followed by a list of
-parameter names in \verb|<<...>>|. A pattern can be a simple pattern
-or a compound pattern. Simple patterns are the name of a named token,
-a regular expression in quotes (inline token), the name of a
-production rule (followed by arguments in \verb|<<...>>|, if the rule
-has parameters), and single line Python statements (\verb|{{...}}|).
-Compound patterns are sequences (\verb|A B C ...|), choices (
-\verb:A | B | C | ...:), options (\verb|[...]|), zero-or-more repetitions
-(\verb|...*|), and one-or-more repetitions (\verb|...+|). Like
-regular expressions, repetition operators have a higher precedence
-than sequences, and sequences have a higher precedence than choices.
-
-Whenever \verb|{{...}}| is used, a legal one-line Python statement
-should be put inside the braces. The token \verb|}}| should not
-appear within the \verb|{{...}}| section, even within a string, since
-Yapps does not attempt to parse the Python statement. A workaround
-for strings is to put two strings together (\verb|"}" "}"|), or to use
-backslashes (\verb|"}\}"|). At the end of a rule you should use a
-\verb|{{ return X }}| statement to return a value. However, you
-should \emph{not} use any control statements (\texttt{return},
-\texttt{continue}, \texttt{break}) in the middle of a rule. Yapps
-needs to make assumptions about the control flow to generate a parser,
-and any changes to the control flow will confuse Yapps.
-
-The \verb|<<...>>| form can occur in two places: to define parameters
-to a rule and to give arguments when matching a rule. Parameters use
-the syntax used for Python functions, so they can include default
-arguments and the special forms (\verb|*args| and \verb|**kwargs|).
-Arguments use the syntax for Python function call arguments, so they
-can include normal arguments and keyword arguments. The token
-\verb|>>| should not appear within the \verb|<<...>>| section.
-
-In both the statements and rule arguments, you can use names defined
-by the parser to refer to matched patterns. You can refer to the text
-matched by a named token by using the token name. You can use the
-value returned by a production rule by using the name of that rule.
-If a name \texttt{X} is matched more than once (such as in loops), you
-will have to save the earlier value(s) in a temporary variable, and
-then use that temporary variable in the return value. The next
-section has an example of a name that occurs more than once.
-
-\mysubsection{Left Factoring}
-\label{sec:Left-Factoring}
-
-Yapps produces ELL(1) parsers, which determine which clause to match
-based on the first token available. Sometimes the leftmost tokens of
-several clauses may be the same. The classic example is the
-\emph{if/then/else} construct in Pascal:
-
-\begin{verbatim}
-rule stmt: "if" expr "then" stmt {{ then_part = stmt }}
- "else" stmt {{ return ('If',expr,then_part,stmt) }}
- | "if" expr "then" stmt {{ return ('If',expr,stmt,[]) }}
-\end{verbatim}
-
-(Note that we have to save the first \texttt{stmt} into a variable
-because there is another \texttt{stmt} that will be matched.) The
-left portions of the two clauses are the same, which presents a
-problem for the parser. The solution is \emph{left-factoring}: the
-common parts are put together, and \emph{then} a choice is made about
-the remaining part:
-
-\begin{verbatim}
-rule stmt: "if" expr
- "then" stmt {{ then_part = stmt }}
- {{ else_part = [] }}
- [ "else" stmt {{ else_part = stmt }} ]
- {{ return ('If', expr, then_part, else_part) }}
-\end{verbatim}
-
-Unfortunately, the classic \emph{if/then/else} situation is
-\emph{still} ambiguous when you left-factor. Yapps can deal with this
-situation, but will report a warning; see section
-\ref{sec:Ambiguous-Grammars} for details.
-
-In general, replace rules of the form:
-
-\begin{verbatim}
-rule A: a b1 {{ return E1 }}
- | a b2 {{ return E2 }}
- | c3 {{ return E3 }}
- | c4 {{ return E4 }}
-\end{verbatim}
-
-with rules of the form:
-
-\begin{verbatim}
-rule A: a ( b1 {{ return E1 }}
- | b2 {{ return E2 }}
- )
- | c3 {{ return E3 }}
- | c4 {{ return E4 }}
-\end{verbatim}
-
-\mysubsection{Left Recursion}
-
-A common construct in grammars is for matching a list of patterns,
-sometimes separated with delimiters such as commas or semicolons. In
-LR-based parser systems, we can parse a list with something like this:
-
-\begin{verbatim}
-rule sum: NUM {{ return NUM }}
- | sum "+" NUM {{ return (sum, NUM) }}
-\end{verbatim}
-
-Parsing \texttt{1+2+3+4} would produce the output
-\texttt{(((1,2),3),4)}, which is what we want from a left-associative
-addition operator. Unfortunately, this grammar is \emph{left
-recursive,} because the \texttt{sum} rule contains a clause that
-begins with \texttt{sum}. (The recursion occurs at the left side of
-the clause.)
-
-We must restructure this grammar to be \emph{right recursive} instead:
-
-\begin{verbatim}
-rule sum: NUM {{ return NUM }}
- | NUM "+" sum {{ return (NUM, sum) }}
-\end{verbatim}
-
-Unfortunately, using this grammar, \texttt{1+2+3+4} would be parsed as
-\texttt{(1,(2,(3,4)))}, which no longer follows left associativity.
-The rule also needs to be left-factored. Instead, we write the
-pattern as a loop instead:
-
-\begin{verbatim}
-rule sum: NUM {{ v = NUM }}
- ( "[+]" NUM {{ v = (v,NUM) }} )*
- {{ return v }}
-\end{verbatim}
-
-In general, replace rules of the form:
-
-\begin{verbatim}
-rule A: A a1 -> << E1 >>
- | A a2 -> << E2 >>
- | b3 -> << E3 >>
- | b4 -> << E4 >>
-\end{verbatim}
-
-with rules of the form:
-
-\begin{verbatim}
-rule A: ( b3 {{ A = E3 }}
- | b4 {{ A = E4 }} )
- ( a1 {{ A = E1 }}
- | a2 {{ A = E2 }} )*
- {{ return A }}
-\end{verbatim}
-
-We have taken a rule that proved problematic for with recursion and
-turned it into a rule that works well with looping constructs.
-
-\mysubsection{Ambiguous Grammars}
-\label{sec:Ambiguous-Grammars}
-
-In section \ref{sec:Left-Factoring} we saw the classic if/then/else
-ambiguity, which occurs because the ``else \ldots'' portion of an ``if
-\ldots then \ldots else \ldots'' construct is optional. Programs with
-nested if/then/else constructs can be ambiguous when one of the else
-clauses is missing:
-\begin{verbatim}
-if 1 then if 1 then
- if 5 then if 5 then
- x := 1; x := 1;
- else else
- y := 9; y := 9;
-\end{verbatim}
-
-The indentation shows that the program can be parsed in two different
-ways. (Of course, if we all would adopt Python's indentation-based
-structuring, this would never happen!) Usually we want the parsing on
-the left: the ``else'' should be associated with the closest ``if''
-statement. In section \ref{sec:Left-Factoring} we ``solved'' the
-problem by using the following grammar:
-
-\begin{verbatim}
-rule stmt: "if" expr
- "then" stmt {{ then_part = stmt }}
- {{ else_part = [] }}
- [ "else" stmt {{ else_part = stmt }} ]
- {{ return ('If', expr, then_part, else_part) }}
-\end{verbatim}
-
-Here, we have an optional match of ``else'' followed by a statement.
-The ambiguity is that if an ``else'' is present, it is not clear
-whether you want it parsed immediately or if you want it to be parsed
-by the outer ``if''.
-
-Yapps will deal with the situation by matching when the else pattern
-when it can. The parser will work in this case because it prefers the
-\emph{first} matching clause, which tells Yapps to parse the ``else''.
-That is exactly what we want!
-
-For ambiguity cases with choices, Yapps will choose the \emph{first}
-matching choice. However, remember that Yapps only looks at the first
-token to determine its decision, so {\tt (a b | a c)} will result in
-Yapps choosing {\tt a b} even when the input is {\tt a c}. It only
-looks at the first token, {\tt a}, to make its decision.
-
-\mysection{Customization}
-
-Both the parsers and the scanners can be customized. The parser is
-usually extended by subclassing, and the scanner can either be
-subclassed or completely replaced.
-
-\mysubsection{Customizing Parsers}
-
-If additional fields and methods are needed in order for a parser to
-work, Python subclassing can be used. (This is unlike parser classes
-written in static languages, in which these fields and methods must be
-defined in the generated parser class.) We simply subclass the
-generated parser, and add any fields or methods required. Expressions
-in the grammar can call methods of the subclass to perform any actions
-that cannot be expressed as a simple expression. For example,
-consider this simple grammar:
-
-\begin{verbatim}
-parser X:
- rule goal: "something" {{ self.printmsg() }}
-\end{verbatim}
-
-The \texttt{printmsg} function need not be implemented in the parser
-class \texttt{X}; it can be implemented in a subclass:
-
-\begin{verbatim}
-import Xparser
-
-class MyX(Xparser.X):
- def printmsg(self):
- print "Hello!"
-\end{verbatim}
-
-\mysubsection{Customizing Scanners}
-
-The generated parser class is not dependent on the generated scanner
-class. A scanner object is passed to the parser object's constructor
-in the \texttt{parse} function. To use a different scanner, write
-your own function to construct parser objects, with an instance of a
-different scanner. Scanner objects must have a \texttt{token} method
-that accepts an integer \texttt{N} as well as a list of allowed token
-types, and returns the Nth token, as a tuple. The default scanner
-raises \texttt{NoMoreTokens} if no tokens are available, and
-\texttt{SyntaxError} if no token could be matched. However, the
-parser does not rely on these exceptions; only the \texttt{parse}
-convenience function (which calls \texttt{wrap\_error\_reporter}) and
-the \texttt{print\_error} error display function use those exceptions.
-
-The tuples representing tokens have four elements. The first two are
-the beginning and ending indices of the matched text in the input
-string. The third element is the type tag, matching either the name
-of a named token or the quoted regexp of an inline or ignored token.
-The fourth element of the token tuple is the matched text. If the
-input string is \texttt{s}, and the token tuple is
-\texttt{(b,e,type,val)}, then \texttt{val} should be equal to
-\texttt{s[b:e]}.
-
-The generated parsers do not the beginning or ending index. They use
-only the token type and value. However, the default error reporter
-uses the beginning and ending index to show the user where the error
-is.
-
-\mysection{Parser Mechanics}
-
-The base parser class (Parser) defines two methods, \texttt{\_scan}
-and \texttt{\_peek}, and two fields, \texttt{\_pos} and
-\texttt{\_scanner}. The generated parser inherits from the base
-parser, and contains one method for each rule in the grammar. To
-avoid name clashes, do not use names that begin with an underscore
-(\texttt{\_}).
-
-\mysubsection{Parser Objects}
-\label{sec:Parser-Objects}
-
-Yapps produces as output two exception classes, a scanner class, a
-parser class, and a function \texttt{parse} that puts everything
-together. The \texttt{parse} function does not have to be used;
-instead, one can create a parser and scanner object and use them
-together for parsing.
-
-\begin{verbatim}
- def parse(rule, text):
- P = X(XScanner(text))
- return wrap_error_reporter(P, rule)
-\end{verbatim}
-
-The \texttt{parse} function takes a name of a rule and an input string
-as input. It creates a scanner and parser object, then calls
-\texttt{wrap\_error\_reporter} to execute the method in the parser
-object named \texttt{rule}. The wrapper function will call the
-appropriate parser rule and report any parsing errors to standard
-output.
-
-There are several situations in which the \texttt{parse} function
-would not be useful. If a different parser or scanner is being used,
-or exceptions are to be handled differently, a new \texttt{parse}
-function would be required. The supplied \texttt{parse} function can
-be used as a template for writing a function for your own needs. An
-example of a custom parse function is the \texttt{generate} function
-in \texttt{Yapps.py}.
-
-\mysubsection{Context Sensitive Scanner}
-
-Unlike most scanners, the scanner produced by Yapps can take into
-account the context in which tokens are needed, and try to match only
-good tokens. For example, in the grammar:
-
-\begin{verbatim}
-parser IniFile:
- token ID: "[a-zA-Z_0-9]+"
- token VAL: ".*"
-
- rule pair: ID "[ \t]*=[ \t]*" VAL "\n"
-\end{verbatim}
-
-we would like to scan lines of text and pick out a name/value pair.
-In a conventional scanner, the input string \texttt{shell=progman.exe}
-would be turned into a single token of type \texttt{VAL}. The Yapps
-scanner, however, knows that at the beginning of the line, an
-\texttt{ID} is expected, so it will return \texttt{"shell"} as a token
-of type \texttt{ID}. Later, it will return \texttt{"progman.exe"} as
-a token of type \texttt{VAL}.
-
-Context sensitivity decreases the separation between scanner and
-parser, but it is useful in parsers like \texttt{IniFile}, where the
-tokens themselves are not unambiguous, but \emph{are} unambiguous
-given a particular stage in the parsing process.
-
-Unfortunately, context sensitivity can make it more difficult to
-detect errors in the input. For example, in parsing a Pascal-like
-language with ``begin'' and ``end'' as keywords, a context sensitive
-scanner would only match ``end'' as the END token if the parser is in
-a place that will accept the END token. If not, then the scanner
-would match ``end'' as an identifier. To disable the context
-sensitive scanner in Yapps, add the
-\texttt{context-insensitive-scanner} option to the grammar:
-
-\begin{verbatim}
-Parser X:
- option: "context-insensitive-scanner"
-\end{verbatim}
-
-Context-insensitive scanning makes the parser look cleaner as well.
-
-\mysubsection{Internal Variables}
-
-There are two internal fields that may be of use. The parser object
-has two fields, \texttt{\_pos}, which is the index of the current
-token being matched, and \texttt{\_scanner}, which is the scanner
-object. The token itself can be retrieved by accessing the scanner
-object and calling the \texttt{token} method with the token index. However, if you call \texttt{token} before the token has been requested by the parser, it may mess up a context-sensitive scanner.\footnote{When using a context-sensitive scanner, the parser tells the scanner what the valid token types are at each point. If you call \texttt{token} before the parser can tell the scanner the valid token types, the scanner will attempt to match without considering the context.} A
-potentially useful combination of these fields is to extract the
-portion of the input matched by the current rule. To do this, just save the scanner state (\texttt{\_scanner.pos}) before the text is matched and then again after the text is matched:
-
-\begin{verbatim}
- rule R:
- {{ start = self._scanner.pos }}
- a b c
- {{ end = self._scanner.pos }}
- {{ print 'Text is', self._scanner.input[start:end] }}
-\end{verbatim}
-
-\mysubsection{Pre- and Post-Parser Code}
-
-Sometimes the parser code needs to rely on helper variables,
-functions, and classes. A Yapps grammar can optionally be surrounded
-by double percent signs, to separate the grammar from Python code.
-
-\begin{verbatim}
-... Python code ...
-%%
-... Yapps grammar ...
-%%
-... Python code ...
-\end{verbatim}
-
-The second \verb|%%| can be omitted if there is no Python code at the
-end, and the first \verb|%%| can be omitted if there is no extra
-Python code at all. (To have code only at the end, both separators
-are required.)
-
-If the second \verb|%%| is omitted, Yapps will insert testing code
-that allows you to use the generated parser to parse a file.
-
-The extended calculator example in the Yapps examples subdirectory
-includes both pre-parser and post-parser code.
-
-\mysubsection{Representation of Grammars}
-
-For each kind of pattern there is a class derived from Pattern. Yapps
-has classes for Terminal, NonTerminal, Sequence, Choice, Option, Plus,
-Star, and Eval. Each of these classes has the following interface:
-
-\begin{itemize}
- \item[setup(\emph{gen})] Set accepts-$\epsilon$, and call
- \emph{gen.changed()} if it changed. This function can change the
- flag from false to true but \emph{not} from true to false.
- \item[update(\emph(gen))] Set \first and \follow, and call
- \emph{gen.changed()} if either changed. This function can add to
- the sets but \emph{not} remove from them.
- \item[output(\emph{gen}, \emph{indent})] Generate code for matching
- this rule, using \emph{indent} as the current indentation level.
- Writes are performed using \emph{gen.write}.
- \item[used(\emph{vars})] Given a list of variables \emph{vars},
- return two lists: one containing the variables that are used, and
- one containing the variables that are assigned. This function is
- used for optimizing the resulting code.
-\end{itemize}
-
-Both \emph{setup} and \emph{update} monotonically increase the
-variables they modify. Since the variables can only increase a finite
-number of times, we can repeatedly call the function until the
-variable stabilized. The \emph{used} function is not currently
-implemented.
-
-With each pattern in the grammar Yapps associates three pieces of
-information: the \first set, the \follow set, and the
-accepts-$\epsilon$ flag.
-
-The \first set contains the tokens that can appear as we start
-matching the pattern. The \follow set contains the tokens that can
-appear immediately after we match the pattern. The accepts-$\epsilon$
-flag is true if the pattern can match no tokens. In this case, \first
-will contain all the elements in \follow. The \follow set is not
-needed when accepts-$\epsilon$ is false, and may not be accurate in
-those cases.
-
-Yapps does not compute these sets precisely. Its approximation can
-miss certain cases, such as this one:
-
-\begin{verbatim}
- rule C: ( A* | B )
- rule B: C [A]
-\end{verbatim}
-
-Yapps will calculate {\tt C}'s \follow set to include {\tt A}.
-However, {\tt C} will always match all the {\tt A}'s, so {\tt A} will
-never follow it. Yapps 2.0 does not properly handle this construct,
-but if it seems important, I may add support for it in a future
-version.
-
-Yapps also cannot handle constructs that depend on the calling
-sequence. For example:
-
-\begin{verbatim}
- rule R: U | 'b'
- rule S: | 'c'
- rule T: S 'b'
- rule U: S 'a'
-\end{verbatim}
-
-The \follow set for {\tt S} includes {\tt a} and {\tt b}. Since {\tt
- S} can be empty, the \first set for {\tt S} should include {\tt a},
-{\tt b}, and {\tt c}. However, when parsing {\tt R}, if the lookahead
-is {\tt b} we should \emph{not} parse {\tt U}. That's because in {\tt
- U}, {\tt S} is followed by {\tt a} and not {\tt b}. Therefore in
-{\tt R}, we should choose rule {\tt U} only if there is an {\tt a} or
-{\tt c}, but not if there is a {\tt b}. Yapps and many other LL(1)
-systems do not distinguish {\tt S b} and {\tt S a}, making {\tt
- S}'s \follow set {\tt a, b}, and making {\tt R} always try to match
-{\tt U}. In this case we can solve the problem by changing {\tt R} to
-\verb:'b' | U: but it may not always be possible to solve all such
-problems in this way.
-
-\appendix
-
-\mysection{Grammar for Parsers}
-
-This is the grammar for parsers, without any Python code mixed in.
-The complete grammar can be found in \texttt{parsedesc.g} in the Yapps
-distribution.
-
-\begin{verbatim}
-parser ParserDescription:
- ignore: "\\s+"
- ignore: "#.*?\r?\n"
- token END: "$" # $ means end of string
- token ATTR: "<<.+?>>"
- token STMT: "{{.+?}}"
- token ID: '[a-zA-Z_][a-zA-Z_0-9]*'
- token STR: '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'
-
- rule Parser: "parser" ID ":"
- Options
- Tokens
- Rules
- END
-
- rule Options: ( "option" ":" STR )*
- rule Tokens: ( "token" ID ":" STR | "ignore" ":" STR )*
- rule Rules: ( "rule" ID OptParam ":" ClauseA )*
-
- rule ClauseA: ClauseB ( '[|]' ClauseB )*
- rule ClauseB: ClauseC*
- rule ClauseC: ClauseD [ '[+]' | '[*]' ]
- rule ClauseD: STR | ID [ATTR] | STMT
- | '\\(' ClauseA '\\) | '\\[' ClauseA '\\]'
-\end{verbatim}
-
-\mysection{Upgrading}
-
-Yapps 2.0 is not backwards compatible with Yapps 1.0. In this section
-are some tips for upgrading:
-
-\begin{enumerate}
- \item Yapps 1.0 was distributed as a single file. Yapps 2.0 is
- instead distributed as two Python files: a \emph{parser generator}
- (26k) and a \emph{parser runtime} (5k). You need both files to
- create parsers, but you need only the runtime (\texttt{yappsrt.py})
- to use the parsers.
-
- \item Yapps 1.0 supported Python 1.4 regular expressions from the
- \texttt{regex} module. Yapps 2.0 uses Python 1.5 regular
- expressions from the \texttt{re} module. \emph{The new syntax for
- regular expressions is not compatible with the old syntax.}
- Andrew Kuchling has a \htmladdnormallink{guide to converting
- regular
- expressions}{http://www.python.org/doc/howto/regex-to-re/} on his
- web page.
-
- \item Yapps 1.0 wants a pattern and then a return value in \verb|->|
- \verb|<<...>>|. Yapps 2.0 allows patterns and Python statements to
- be mixed. To convert a rule like this:
-
-\begin{verbatim}
-rule R: A B C -> << E1 >>
- | X Y Z -> << E2 >>
-\end{verbatim}
-
- to Yapps 2.0 form, replace the return value specifiers with return
- statements:
-
-\begin{verbatim}
-rule R: A B C {{ return E1 }}
- | X Y Z {{ return E2 }}
-\end{verbatim}
-
- \item Yapps 2.0 does not perform tail recursion elimination. This
- means any recursive rules you write will be turned into recursive
- methods in the parser. The parser will work, but may be slower.
- It can be made faster by rewriting recursive rules, using instead
- the looping operators \verb|*| and \verb|+| provided in Yapps 2.0.
-
-\end{enumerate}
-
-\mysection{Troubleshooting}
-
-\begin{itemize}
- \item A common error is to write a grammar that doesn't have an END
- token. End tokens are needed when it is not clear when to stop
- parsing. For example, when parsing the expression {\tt 3+5}, it is
- not clear after reading {\tt 3} whether to treat it as a complete
- expression or whether the parser should continue reading.
- Therefore the grammar for numeric expressions should include an end
- token. Another example is the grammar for Lisp expressions. In
- Lisp, it is always clear when you should stop parsing, so you do
- \emph{not} need an end token. In fact, it may be more useful not
- to have an end token, so that you can read in several Lisp expressions.
- \item If there is a chance of ambiguity, make sure to put the choices
- in the order you want them checked. Usually the most specific
- choice should be first. Empty sequences should usually be last.
- \item The context sensitive scanner is not appropriate for all
- grammars. You might try using the insensitive scanner with the
- {\tt context-insensitive-scanner} option in the grammar.
- \item If performance turns out to be a problem, try writing a custom
- scanner. The Yapps scanner is rather slow (but flexible and easy
- to understand).
-\end{itemize}
-
-\mysection{History}
-
-Yapps 1 had several limitations that bothered me while writing
-parsers:
-
-\begin{enumerate}
- \item It was not possible to insert statements into the generated
- parser. A common workaround was to write an auxilliary function
- that executed those statements, and to call that function as part
- of the return value calculation. For example, several of my
- parsers had an ``append(x,y)'' function that existed solely to call
- ``x.append(y)''.
- \item The way in which grammars were specified was rather
- restrictive: a rule was a choice of clauses. Each clause was a
- sequence of tokens and rule names, followed by a return value.
- \item Optional matching had to be put into a separate rule because
- choices were only made at the beginning of a rule.
- \item Repetition had to be specified in terms of recursion. Not only
- was this awkward (sometimes requiring additional rules), I had to
- add a tail recursion optimization to Yapps to transform the
- recursion back into a loop.
-\end{enumerate}
-
-Yapps 2 addresses each of these limitations.
-
-\begin{enumerate}
- \item Statements can occur anywhere within a rule. (However, only
- one-line statements are allowed; multiline blocks marked by
- indentation are not.)
- \item Grammars can be specified using any mix of sequences, choices,
- tokens, and rule names. To allow for complex structures,
- parentheses can be used for grouping.
- \item Given choices and parenthesization, optional matching can be
- expressed as a choice between some pattern and nothing. In
- addition, Yapps 2 has the convenience syntax \verb|[A B ...]| for
- matching \verb|A B ...| optionally.
- \item Repetition operators \verb|*| for zero or more and \verb|+| for
- one or more make it easy to specify repeating patterns.
-\end{enumerate}
-
-It is my hope that Yapps 2 will be flexible enough to meet my needs
-for another year, yet simple enough that I do not hesitate to use it.
-
-\mysection{Future Extensions}
-\label{sec:future}
-
-I am still investigating the possibility of LL(2) and higher
-lookahead. However, it looks like the resulting parsers will be
-somewhat ugly.
-
-It would be nice to control choices with user-defined predicates.
-
-The most likely future extension is backtracking. A grammar pattern
-like \verb|(VAR ':=' expr)? {{ return Assign(VAR,expr) }} : expr {{ return expr }}|
-would turn into code that attempted to match \verb|VAR ':=' expr|. If
-it succeeded, it would run \verb|{{ return ... }}|. If it failed, it
-would match \verb|expr {{ return expr }}|. Backtracking may make it
-less necessary to write LL(2) grammars.
-
-\mysection{References}
-
-\begin{enumerate}
- \item The \htmladdnormallink{Python-Parser
- SIG}{http://www.python.org/sigs/parser-sig/} is the first place
- to look for a list of parser systems for Python.
-
- \item ANTLR/PCCTS, by Terrence Parr, is available at
- \htmladdnormallink{The ANTLR Home Page}{http://www.antlr.org/}.
-
- \item PyLR, by Scott Cotton, is at \htmladdnormallink{his Starship
- page}{http://starship.skyport.net/crew/scott/PyLR.html}.
-
- \item John Aycock's \htmladdnormallink{Compiling Little Languages
- Framework}{http://www.csr.UVic.CA/~aycock/python/}.
-
- \item PyBison, by Scott Hassan, can be found at
- \htmladdnormallink{his Python Projects
- page}{http://coho.stanford.edu/\~{}hassan/Python/}.
-
- \item mcf.pars, by Mike C. Fletcher, is available at
- \htmladdnormallink{his web
- page}{http://www.golden.net/\~{}mcfletch/programming/}.
-
- \item kwParsing, by Aaron Watters, is available at
- \htmladdnormallink{his Starship
- page}{http://starship.skyport.net/crew/aaron_watters/kwParsing/}.
-\end{enumerate}
-
-\end{document}
diff --git a/util/sconfig/yappsrt.py b/util/sconfig/yappsrt.py
deleted file mode 100644
index 2ce2480f08..0000000000
--- a/util/sconfig/yappsrt.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Yapps 2.0 Runtime
-#
-# This module is needed to run generated parsers.
-
-from string import *
-import exceptions
-import re
-
-class SyntaxError(Exception):
- """When we run into an unexpected token, this is the exception to use"""
- def __init__(self, pos=-1, msg="Bad Token"):
- self.pos = pos
- self.msg = msg
- def __repr__(self):
- if self.pos < 0: return "#<syntax-error>"
- else: return "SyntaxError[@ char " + `self.pos` + ": " + self.msg + "]"
-
-class NoMoreTokens(Exception):
- """Another exception object, for when we run out of tokens"""
- pass
-
-class Scanner:
- def __init__(self, patterns, ignore, input):
- """Patterns is [(terminal,regex)...]
- Ignore is [terminal,...];
- Input is a string"""
- self.tokens = []
- self.restrictions = []
- self.input = input
- self.pos = 0
- self.ignore = ignore
- # The stored patterns are a pair (compiled regex,source
- # regex). If the patterns variable passed in to the
- # constructor is None, we assume that the class already has a
- # proper .patterns list constructed
- if patterns is not None:
- self.patterns = []
- for k,r in patterns:
- self.patterns.append( (k, re.compile(r)) )
-
- def token(self, i, restrict=0):
- """Get the i'th token, and if i is one past the end, then scan
- for another token; restrict is a list of tokens that
- are allowed, or 0 for any token."""
- if i == len(self.tokens): self.scan(restrict)
- if i < len(self.tokens):
- # Make sure the restriction is more restricted
- if restrict and self.restrictions[i]:
- for r in restrict:
- if r not in self.restrictions[i]:
- raise "Unimplemented: restriction set changed"
- return self.tokens[i]
- raise NoMoreTokens()
-
- def __repr__(self):
- """Print the last 10 tokens that have been scanned in"""
- output = ''
- for t in self.tokens[-10:]:
- output = '%s\n (@%s) %s = %s' % (output,t[0],t[2],`t[3]`)
- return output
-
- def scan(self, restrict):
- """Should scan another token and add it to the list, self.tokens,
- and add the restriction to self.restrictions"""
- # Keep looking for a token, ignoring any in self.ignore
- while 1:
- # Search the patterns for the longest match, with earlier
- # tokens in the list having preference
- best_match = -1
- best_pat = '(error)'
- for p, regexp in self.patterns:
- # First check to see if we're ignoring this token
- if restrict and p not in restrict and p not in self.ignore:
- continue
- m = regexp.match(self.input, self.pos)
- if m and len(m.group(0)) > best_match:
- # We got a match that's better than the previous one
- best_pat = p
- best_match = len(m.group(0))
-
- # If we didn't find anything, raise an error
- if best_pat == '(error)' and best_match < 0:
- msg = "Bad Token"
- if restrict:
- msg = "Trying to find one of "+join(restrict,", ")
- raise SyntaxError(self.pos, msg)
-
- # If we found something that isn't to be ignored, return it
- if best_pat not in self.ignore:
- # Create a token with this data
- token = (self.pos, self.pos+best_match, best_pat,
- self.input[self.pos:self.pos+best_match])
- self.pos = self.pos + best_match
- # Only add this token if it's not in the list
- # (to prevent looping)
- if not self.tokens or token != self.tokens[-1]:
- self.tokens.append(token)
- self.restrictions.append(restrict)
- return
- else:
- # This token should be ignored ..
- self.pos = self.pos + best_match
-
-class Parser:
- def __init__(self, scanner):
- self._scanner = scanner
- self._pos = 0
-
- def _peek(self, *types):
- """Returns the token type for lookahead; if there are any args
- then the list of args is the set of token types to allow"""
- tok = self._scanner.token(self._pos, types)
- return tok[2]
-
- def _scan(self, type):
- """Returns the matched text, and moves to the next token"""
- tok = self._scanner.token(self._pos, [type])
- if tok[2] != type:
- raise SyntaxError(tok[0], 'Trying to find '+type)
- self._pos = 1+self._pos
- return tok[3]
-
-
-
-def print_error(input, err, scanner):
- """This is a really dumb long function to print error messages nicely."""
- p = err.pos
- # Figure out the line number
- line = count(input[:p], '\n')
- print err.msg+" on line "+`line+1`+":"
- # Now try printing part of the line
- text = input[max(p-80,0):p+80]
- p = p - max(p-80,0)
-
- # Strip to the left
- i = rfind(text[:p],'\n')
- j = rfind(text[:p],'\r')
- if i < 0 or (j < i and j >= 0): i = j
- if i >= 0 and i < p:
- p = p - i - 1
- text = text[i+1:]
-
- # Strip to the right
- i = find(text,'\n',p)
- j = find(text,'\r',p)
- if i < 0 or (j < i and j >= 0): i = j
- if i >= 0:
- text = text[:i]
-
- # Now shorten the text
- while len(text) > 70 and p > 60:
- # Cut off 10 chars
- text = "..." + text[10:]
- p = p - 7
-
- # Now print the string, along with an indicator
- print '> ',text
- print '> ',' '*p + '^'
- print 'List of nearby tokens:', scanner
-
-def wrap_error_reporter(parser, rule):
- try: return getattr(parser, rule)()
- except SyntaxError, s:
- input = parser._scanner.input
- try:
- print_error(input, s, parser._scanner)
- except ImportError:
- print 'Syntax Error',s.msg,'on line',1+count(input[:s.pos], '\n')
- except NoMoreTokens:
- print 'Could not complete parsing; stopped around here:'
- print parser._scanner
-