_____________________________________________________________________________
Immediately
-// use 'a'-'z' or 'a-z' instead of [a-z]?
-// EOF token?
-// #include (with renaming?)
-
+ - use 'a'-'z' or 'a-z' instead of [a-z]?
+ - EOF token?
- de-genericize?
-
- better toString() methods all around...
-
- - cleanup: Forest, ParseFailed, Parser, Union, (just a bit: GSS,Node)
-
- - circular gramars?
- s = A
- A = A | "b"
- foo.add(x)
foo.add(y.andnot(x)) ==> this is broken
- - Annotation Tutorial
+ - distinguish Conjunct from Sequence?
+ => !(Conjunct instanceof Reducible)
+ - document the assumption that Sequences that match epsilon
+ must have tag, and that ONLY that tag is returned
+ when the sequence matches epsilon
+ - try to avoid building the parts of the tree that end up getting
+ dropped
+ - double-check all the region logic
..................................................
+ - paper/techreport opportunities
+ - interaction between RNGLR and follow restrictions
+ - "doomed node" optimization
+
+ - automatically collect time statistics and display
- serializable parse tables?
- - Treewalker code compiler?
+ - better ambiguity reporting
+ - colorized tree-diffs?
+ - graphviz?
______________________________________________________________________________
v1.1
+ - Treewalker code compiler?
+ - circular gramars?
+ s = A
+ A = A | "b"
+ - skeleton generator?
- precedes restrictions ("<-")
- MUST HAVE BETTER ERROR MESSAGES
- use for developing java15.g
- More topology untangling [later]
- grammar highlighting?
- Forest needs a "manual access" API
- - the unwrap bit in Forest makes it really hard to expose an API for forests
-
-
+ - the unwrap bit in Forest makes it really hard
+ to expose an API for forests
+ - rewriting language? multiple passes?
______________________________________________________________________________
v1.2
-// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
+// Copyright 2006-2007 all rights reserved; see LICENSE file for BSD-style license
package edu.berkeley.sbp;
-import edu.berkeley.sbp.*;
import edu.berkeley.sbp.util.*;
-import edu.berkeley.sbp.Sequence.Position;
-import java.io.*;
import java.util.*;
-/** if ambiguity checking is enabled, this exception is thrown to signal that the parse was ambiguous */
+/** thrown to signal that a parse was ambiguous */
public class Ambiguous extends Exception {
- final Forest<?> ambiguity;
- private final HashSet<Tree<?>> ht;
+ private final Forest<?> ambiguity;
+ private final HashSet<Tree<?>> possibilities;
/**
- * @param ht a specially-constructed set of trees with shared nodes replaced by '*'
+ * @param possibilities is a specially-constructed set of trees with shared nodes replaced by '*'
*/
- Ambiguous(Forest<?> ambiguity, HashSet<Tree<?>> ht) {
+ Ambiguous(Forest<?> ambiguity, HashSet<Tree<?>> possibilities) {
this.ambiguity = ambiguity;
- this.ht = ht;
+ this.possibilities = possibilities;
}
- public Forest<?> getAmbiguity() { return ambiguity; }
-
- /** WARNING: this method is not considered part of the "stable API"; it may be removed in the future */
- public Input.Region getRegion() { return ambiguity.getRegion(); }
+ public Forest<?> getForest() { return ambiguity; }
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("unresolved ambiguity at "+ambiguity.getRegion()+"; shared subtrees are shown as \"*\" ");
- for(Tree<?> result : ht) {
+ for(Tree<?> result : possibilities) {
sb.append("\n possibility: ");
StringBuffer sb2 = new StringBuffer();
result.toPrettyString(sb2);
-// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
+// (C) 2006-2007 all rights reserved; see LICENSE file for BSD-style license
package edu.berkeley.sbp;
-import java.io.*;
-import java.util.*;
-import java.lang.reflect.*;
-import java.lang.ref.*;
import edu.berkeley.sbp.util.*;
-import edu.berkeley.sbp.*;
-import edu.berkeley.sbp.*;
/**
- * <font color=green>an element which matches some set of one-token-long input strings</font>.
+ * <font color=green>
+ * an element which matches some set of one-token-long input strings
+ * </font>.
*
* <p>
- * This class is a topology over itself (yes, that's sort of <a
- * href=http://en.wikipedia.org/wiki/Frege>Frege'd</a> up) so that
- * Atoms can be intersected and unioned with each other to result in
- * other Atom<Token>'s (rather than raw Topology<Token>'s, which are
- * not Elements). If you want the latter, use the getTokenTopology()
- * method.
+ * This class is a topology over itself (yes, this is impredicative).
+ * This means that you can call Atom.union(Atom) and get back an Atom.
+ * If you are interested in the topology of <i>tokens</i> which this
+ * Atom can match, use the <tt>getTokenTopology()</tt> method.
* </p>
*/
-public abstract class Atom<Token> extends Element implements Topology<Atom<Token>> {
+public abstract class Atom<Token>
+ extends Element
+ implements Topology<Atom<Token>> {
/** the set (topology) of tokens that can match this element */
public abstract Topology<Token> getTokenTopology();
import java.lang.reflect.*;
import java.lang.ref.*;
-class Cache {
- private Union root;
- private Topology top;
+abstract class Cache<Token> {
+ protected Union rootUnion;
public HashMap<Sequence, Topology> follow = new HashMap<Sequence, Topology>();
public HashSet<Sequence> followEof = new HashSet<Sequence>();
public final HashMap<SequenceOrElement,Boolean> possiblyEpsilon = new HashMap<SequenceOrElement,Boolean>();
public HashSet<SequenceOrElement> all = new HashSet<SequenceOrElement>();
- public Cache(Union root, Topology top) {
- this.root = root;
- this.top = top;
+ abstract Topology<Token> emptyTopology();
+ public Cache(Union root) {
+ this.rootUnion = root;
+ if (root != null)
+ for(Sequence s : root)
+ buildFollowSet(s, emptyTopology(), true);
}
// Follow //////////////////////////////////////////////////////////////////////////////
}
public Topology epsilonFollowSet(Union u) {
- Topology ret = top.empty();
+ Topology ret = emptyTopology();
for(Sequence s : u)
ret = ret.union(epsilonFollowSet(s, new HashSet<Sequence>()));
return ret;
}
public Topology epsilonFollowSet(Sequence seq, HashSet<Sequence> seen) {
- Topology ret = seq.follow==null ? top.empty().complement() : seq.follow.getTokenTopology();
+ Topology ret = seq.follow==null ? emptyTopology().complement() : seq.follow.getTokenTopology();
if (seen.contains(seq)) return ret;
seen.add(seq);
for(Position p = seq.firstp(); p!=null && !p.isLast(); p = p.next()) {
if (!(p.element() instanceof Union)) continue;
- Topology t = top.empty();
+ Topology t = emptyTopology();
for(Sequence s : ((Union)p.element()))
if (possiblyEpsilon(s))
t = t.union(epsilonFollowSet(s, seen));
public Topology first(SequenceOrElement e, HashSet<Sequence> seen) {
if (e instanceof Atom) return ((Atom)e).getTokenTopology();
- Topology ret = top.empty();
+ Topology ret = emptyTopology();
if (e instanceof Union) {
for(Sequence s : ((Union)e)) ret = ret.union(first(s, seen));
} else {
Sequence seq = (Sequence)e;
- if (seen.contains(seq)) return top.empty();
+ if (seen.contains(seq)) return emptyTopology();
seen.add(seq);
for(Position p = seq.firstp(); p!=null && !p.isLast(); p = p.next()) {
ret = ret.union(first(p.element(), seen));
-// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
+// (C) 2006-2007 all rights reserved; see LICENSE file for BSD-style license
package edu.berkeley.sbp;
-import edu.berkeley.sbp.util.*;
-import edu.berkeley.sbp.*;
-import edu.berkeley.sbp.*;
-import java.io.*;
import java.util.*;
-import java.lang.reflect.*;
-import java.lang.ref.*;
-/** <font color=green>the root superclass for all components of the grammar (terminals, nonterminals, literals, etc)</font> */
+/**
+ * <font color=green>
+ * the root superclass for all components of the grammar (terminals,
+ * nonterminals, literals, etc)
+ * </font>
+ */
public abstract class Element implements SequenceOrElement {
/** sorry, you can't make up new, custom elements */
Element() { }
- /** a more verbose version of toString() which should show the entire grammar */
+ /** a more verbose version of toString() for displaying whole grammars */
abstract StringBuffer toString(StringBuffer sb);
- /** returns the Forest resulting from matching this element against the empty string */
- Forest<?> epsilonForm(Input.Region loc) { throw new Error("element " + this + " has no epsilon form"); }
-
}
// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
package edu.berkeley.sbp;
-import edu.berkeley.sbp.*;
-import edu.berkeley.sbp.Sequence.Position;
import edu.berkeley.sbp.util.*;
import java.io.*;
import java.util.*;
-import java.lang.reflect.*;
/**
* <font color=blue>
public abstract Tree<NodeType> expand1() throws Ambiguous;
/** expand this forest into a set of trees */
- public void expand(HashSet<Tree<NodeType>> ht) { expand(ht, new HashSet<Forest<NodeType>>(), null); }
-
- static <NodeType> Forest<NodeType> create(Input.Region loc, NodeType head, Forest<NodeType>[] children, boolean lift) {
- if (loc == null) throw new RuntimeException("invoked Forest.create(null,,,,) -- this should never happen");
- return new One<NodeType>(loc, head, children, lift);
+ public Iterable<Tree<NodeType>> expand() {
+ HashSet<Tree<NodeType>> ht = new HashSet<Tree<NodeType>>();
+ expand(ht, new HashSet<Forest<NodeType>>(), null);
+ return ht;
}
- /** create a new forest */
- public static <NodeType> Forest<NodeType> create(Input.Region loc, NodeType head, Forest<NodeType>[] children) {
- return Forest.create(loc, head, children, false); }
+ /** returns the input Region which this Forest was parsed from */
+ public abstract Input.Region getRegion();
// Package-Private //////////////////////////////////////////////////////////////////////////////
+ static <NodeType> Forest<NodeType> create(Input.Region region, NodeType head, Forest<NodeType>[] children, boolean lift) {
+ if (region == null) throw new RuntimeException("invoked Forest.create(region=null) -- this should never happen");
+ return new One<NodeType>(region, head, children, lift);
+ }
+
+ /** create a new forest */
+ public static <NodeType> Forest<NodeType> create(Input.Region region, NodeType head, Forest<NodeType>[] children) {
+ return Forest.create(region, head, children, false); }
+
abstract void expand(HashSet<Tree<NodeType>> ht, HashSet<Forest<NodeType>> ignore, Tree<NodeType> bogus);
abstract void gather(HashSet<Forest<NodeType>> ignore);
abstract void edges(GraphViz.Node n);
boolean ambiguous() { return false; }
- abstract Input.Region getRegion();
-
// One //////////////////////////////////////////////////////////////////////////////
/** A "single" forest with a head and child subforests */
/** if true, the last child's children are considered children of this node */
private final boolean lift;
- Input.Region getRegion() { return location; }
+ public Input.Region getRegion() { return location; }
private One(Input.Region loc, NodeType head, Forest<NodeType>[] children, boolean lift) {
this.location = loc;
if (ignore.contains(this)) { ht.add(bogus); return; }
expand(0, new Tree[children.length], ht, ignore, bogus);
}
- private void expand(final int i, Tree<NodeType>[] ta, HashSet<Tree<NodeType>> ht, HashSet<Forest<NodeType>> ignore, Tree<NodeType> bogus) {
+ private void expand(final int i, Tree<NodeType>[] ta, HashSet<Tree<NodeType>> ht, HashSet<Forest<NodeType>> ignore,
+ Tree<NodeType> bogus) {
if (i==children.length) {
ht.add(new Tree<NodeType>(location, head, ta, lift));
} else {
public Many() { }
- Input.Region getRegion() { return hp.iterator().next().getRegion(); } // all should be identical
+ public Input.Region getRegion() { return hp.iterator().next().getRegion(); } // all should be identical
public Tree<NodeType> expand1() throws Ambiguous {
touched();
public Forest.Many finalResult;
private PriorityQueue<Reduction> reductionQueue = new PriorityQueue<Reduction>();
+ Parser parser() { return parser; }
public void addReduction(Reduction r) {
//System.out.println("+ " + r);
parser.spin();
newNode(primordealResult, startState, true);
}
public Phase(Phase prev, Forest forest) throws ParseFailed, IOException {
- this.location = input.getLocation();
+ this.prevLocation = input.getLocation();
this.token = (Tok)input.next();
- this.prevLocation = prev==null ? location : prev.getLocation();
+ this.location = input.getLocation();
this.prev = prev;
this.forest = forest;
this.pos = prev==null ? 0 : prev.pos+1;
public Input.Location getPrevLocation() { return prevLocation; }
public Input.Location getLocation() { return location; }
- public Input.Region getRegion() { return getPrevLocation().createRegion(getLocation()); }
+ public Input.Region getRegion() { return prevLocation.createRegion(location); }
public Input.Location getNextLocation() { return nextLocation; }
public boolean isFrontier() { return hash!=null; }
if (!state.canReduce(token)) return false;
} while(false);
Node n = new Node(Phase.this, result, state, fromEmptyReduction); // ALLOC
- for(Object s : state.also)
+ for(Object s : state.conjunctStates)
newNode(new Result(null, n, null), (State)s, fromEmptyReduction);
return true;
}
-// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
+// (C) 2006-2007 all rights reserved; see LICENSE file for BSD-style license
package edu.berkeley.sbp;
import java.io.*;
import java.util.*;
-import java.lang.reflect.*;
-import java.lang.ref.*;
-import edu.berkeley.sbp.*;
import edu.berkeley.sbp.util.*;
+// FEATURE: Region implements Topology<Location<Tok>>
+
/** <font color=purple>a stream of <tt>Token</tt>s to be parsed</font> */
public interface Input<Token> {
- /** returns the token just beyond the current location and advances beyond it */
- public Token next() throws IOException;
-
- /** returns the location the input stream is currently at */
+ /** the current location within the input stream */
public Location<Token> getLocation();
-
- /** should return a short string describing where the input is coming from */
+
+ /** returns the token just beyond the current location and advances beyond it */
+ public Token next() throws IOException;
+
+ /** a short string describing where the input is coming from, such as a filename */
public String getName();
/**
*/
public abstract String showRegion(Region<Token> r, int maxLength);
- /** <font color=purple>a location (position) in the input stream -- <i>between tokens</i></font> */
+ /** <font color=purple>a location (position) in the input stream <i>between tokens</i></font> */
public static interface Location<Token> extends Comparable<Location> {
/** return the region between this location and <tt>loc</tt> */
}
/** <font color=purple>a contiguous set of <tt>Location</tt>s</font> */
- public static interface Region<Token> /* implements Topology<Location<Tok>> */ {
+ public static interface Region<Token> {
- /**
- * the toString() method of Region should return a <80char
- * "rendition" of the input region, if possible
- */
+ /** should return less than 80 chars if possible */
public abstract String toString();
/** The location of the start of this region */
public final void invoke(Position r, Result only) {
boolean emptyProductions = only==null;
if (emptyProductions != (r.pos==0)) return;
- if (r.pos==0) new Result(r.zero(phase().getLocation().createRegion(phase().getLocation())), this, r, phase());
- else reduce(r, r.pos-1, phase(), only);
+ if (r.pos!=0) reduce(r, r.pos-1, phase(), only);
+ else {
+ Input.Region region = phase().getLocation().createRegion(phase().getLocation());
+ new Result(r.rewrite(region, phase().parser().cache()), this, r, phase());
+ }
}
private void reduce(Position r, int pos, GSS.Phase target, Result only) {
Node child = res.parent();
holder[pos] = res.getForest();
if (pos>0) child.reduce(r, pos-1, target, null);
- else new Reduction(child, r, r.rewrite(child.phase().getLocation().createRegion(target.getLocation())), target);
+ else {
+ Input.Region region = child.phase().getLocation().createRegion(target.getLocation());
+ new Reduction(child, r, r.rewrite(region, phase().parser().cache()), target);
+ }
}
holder[pos] = old;
}
if (results.size()==0) return null;
if (gv.hasNode(this)) return gv.createNode(this);
GraphViz.Node n = gv.createNode(this);
- n.label = ""+state.toStringx();
+ n.label = "state["+state.toInt()+"]";
n.shape = "rectangle";
boolean hasparents = false;
for(Result r : results) n.edge(r, "");
/*
else if (p.pos-raise > 0)
barf(sb, n, indent, false, 1);
- */
if (!new Cache(null, null).possiblyEpsilon(p.element()))
break;
+ */
p = p.next();
raise++;
if (p.isLast()) {
ret.append('\n');
ret.append(" text: ");
int budget = 60;
- String second = input.showRegion(region);
+ String second = input.showRegion(region, 60);
budget -= second.length();
Input.Location after = region.getEnd();
for(int i=0; i<10; i++) after = after.next() == null ? after : after.next();
- String third = input.showRegion(region.getEnd().createRegion(after));
+ String third = input.showRegion(region.getEnd().createRegion(after), 60);
budget -= third.length();
Input.Location before = region.getStart();
for(int i=0; i<budget; i++) before = before.prev() == null ? before : before.prev();
- String first = input.showRegion(before.createRegion(region.getStart()));
+ String first = input.showRegion(before.createRegion(region.getStart()), 60);
ret.append(ANSI.cyan(first));
ret.append(ANSI.invert(ANSI.red(second)));
ret.append(ANSI.cyan(third));
ret.append("\n or " + ANSI.purple(s));
}
Input.Region reg = loc2.createRegion(region.getEnd());
- ret.append(" to match \"" + ANSI.cyan(input.showRegion(reg)) + "\" at " + ANSI.yellow(reg));
+ ret.append(" to match \"" + ANSI.cyan(input.showRegion(reg, 60)) + "\" at " + ANSI.yellow(reg));
i++;
}
}
// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
package edu.berkeley.sbp;
-import edu.berkeley.sbp.*;
import edu.berkeley.sbp.util.*;
import edu.berkeley.sbp.Sequence.Position;
import java.io.*;
import java.util.*;
+// FEATURE: try harder to "fuse" states together along two dimensions:
+// - identical (equivalent) states, or states that subsume each other
+// - unnecessary intermediate states ("short cut" GLR)
+
/** a parser which translates an Input<Token> into a Forest<NodeType> */
public abstract class Parser<Token, NodeType> {
- protected final Table<Token> pt;
+ final Table pt;
/** create a parser to parse the grammar with start symbol <tt>u</tt> */
- public Parser(Union u, Topology<Token> top) { this.pt = new Table<Token>(u, top); }
+ public Parser(Union u) { this.pt = new Table(u); }
/** implement this method to create the output forest corresponding to a lone shifted input token */
- public abstract Forest<NodeType> shiftToken(Token t, Input.Location newloc);
+ public abstract Forest<NodeType> shiftToken(Token t, Input.Region region);
- public String toString() { return pt.toString(); }
+ public abstract Topology<Token> emptyTopology();
- private boolean verbose = false;;
- private static final char[] spin = new char[] { '-', '\\', '|', '/' };
- private int spinpos = 0;
- private long last = 0;
- void spin() {
- if (verbose) {
- long now = System.currentTimeMillis();
- if (now-last < 70) return;
- last = now;
- System.err.print("\r " + spin[spinpos++ % (spin.length)]+"\r");
- }
- }
+ public String toString() { return pt.toString(); }
+ Cache cache() { return pt; }
/** parse <tt>input</tt>, and return the shared packed parse forest (or throw an exception) */
public Forest<NodeType> parse(Input<Token> input) throws IOException, ParseFailed {
for(GSS.Phase current = gss.new Phase<Token>(pt.start); ;) {
if (verbose) {
+ // FIXME: clean this up
String s;
s = " " + spin[spinpos++ % (spin.length)]+" parsing ";
s += input.getName();
String y = "@"+gss.viewPos+" ";
while(y.length() < 9) y = " " + y;
s += y;
- //s += " doom="+Node.doomedNodes;
- //while(s.length() < 40) s = s + " ";
s += " nodes="+gss.numOldNodes;
while(s.length() < 50) s = s + " ";
s += " shifted="+gss.numNewNodes;
System.err.print("\r"+s+ANSI.clreol()+"\r");
}
- // FIXME: make sure all the locations line up properly in here
if (current.isDone()) return (Forest<NodeType>)current.finalResult;
- Forest forest = shiftToken((Token)current.token, input.getLocation());
+ Forest forest = shiftToken((Token)current.token, current.getRegion());
current = gss.new Phase<Token>(current, forest);
}
- } finally {
- if (verbose)
- System.err.print("\r \r");
- }
+ } finally { if (verbose) System.err.print("\r"+ANSI.clreol()); }
}
+ // Spinner //////////////////////////////////////////////////////////////////////////////
+
+ private boolean verbose = false;
+ private static final char[] spin = new char[] { '-', '\\', '|', '/' };
+ private int spinpos = 0;
+ private long last = 0;
+ void spin() {
+ if (!verbose) return;
+ long now = System.currentTimeMillis();
+ if (now-last < 70) return;
+ last = now;
+ System.err.print("\r " + spin[spinpos++ % (spin.length)]+"\r");
+ }
// Table //////////////////////////////////////////////////////////////////////////////
/** an SLR(1) parse table which may contain conflicts */
- static class Table<Token> extends Cache {
+ class Table extends Cache<Token> {
/** the start state */
- public final State<Token> start;
+ final State<Token> start;
- /** the state from which no reductions can be done */
+ /** a dummy state from which no reductions can be performed */
private final State<Token> dead_state;
/** used to generate unique values for State.idx */
private int master_state_idx = 0;
- HashSet<State<Token>> all_states = new HashSet<State<Token>>();
+
+ /** all the states for this table */
+ HashSet<State<Token>> all_states = new HashSet<State<Token>>();
+
+ /** all the doomed states in this table */
HashMap<HashSet<Position>,State<Token>> doomed_states = new HashMap<HashSet<Position>,State<Token>>();
+
+ /** all the non-doomed states in this table */
HashMap<HashSet<Position>,State<Token>> normal_states = new HashMap<HashSet<Position>,State<Token>>();
+ Topology<Token> emptyTopology() { return Parser.this.emptyTopology(); }
+
/** construct a parse table for the given grammar */
- public Table(Topology top) { this("s", top); }
- public Table(String startSymbol, Topology top) { this(new Union(startSymbol), top); }
- public Table(Union ux, Topology top) {
- super(ux, top);
- Union start0 = new Union("0");
- Sequence seq0 = new Sequence.Singleton(ux);
- start0.add(seq0);
- buildFollowSet(seq0, top, true);
-
- // construct the set of states
- HashSet<Position> hp = new HashSet<Position>();
- reachable(start0, hp);
+ Table(Union ux) {
+ super(new Union("0", Sequence.create(ux), true));
+ // create the "dead state"
this.dead_state = new State<Token>(new HashSet<Position>(), true);
- this.start = new State<Token>(hp);
+ // construct the start state; this will recursively create *all* the states
+ this.start = new State<Token>(reachable(rootUnion), false);
+
+ buildReductions();
+ sortReductions();
+ }
+
+ /** fill in the reductions table */
+ private void buildReductions() {
// for each state, fill in the corresponding "row" of the parse table
for(State<Token> state : all_states)
for(Position p : state.hs) {
- // the Grammar's designated "last position" is the only accepting state
- if (start0.contains(p.owner()) && p.next()==null && !state.doomed)
- state.accept = true;
-
- if (isRightNullable(p)) {
- Topology<Token> follow = (Topology<Token>)follow(p.owner());
- for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next()) {
- if (!(p2.element() instanceof Union)) throw new Error("impossible");
- Union u = (Union)p2.element();
- Atom set = new edu.berkeley.sbp.chr.CharAtom(new edu.berkeley.sbp.chr.CharTopology((Topology<Character>)epsilonFollowSet(u)));
- Element p2e = p2.element();
- if (p2e instanceof Union)
- for(Sequence p2es : ((Union)p2e))
- follow = follow.intersect(follow(p2es));
- if (set != null) follow = follow.intersect(set.getTokenTopology());
- }
- state.reductions.put(follow, p);
- if (followEof.contains(p.owner())) state.eofReductions.add(p);
- }
-
// if the element following this position is an atom, copy the corresponding
// set of rows out of the "master" goto table and into this state's shift table
if (p.element() != null && p.element() instanceof Atom)
state.shifts.addAll(state.gotoSetTerminals.subset(((Atom)p.element()).getTokenTopology()));
+
+ // RNGLR: we can potentially reduce from any "right-nullable" position -- that is,
+ // any position for which all Elements after it in the Sequence are capable of
+ // matching the empty string.
+ if (!isRightNullable(p)) continue;
+ Topology<Token> follow = follow(p.owner());
+ for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next()) {
+ if (!(p2.element() instanceof Union))
+ throw new Error("impossible -- only Unions can be nullable");
+
+ // interesting RNGLR-followRestriction interaction: we must intersect
+ // not just the follow-set of the last non-nullable element, but the
+ // follow-sets of the nulled elements as well.
+ for(Sequence s : ((Union)p2.element()))
+ follow = follow.intersect(follow(s));
+ Topology<Token> set = epsilonFollowSet((Union)p2.element());
+ if (set != null) follow = follow.intersect(set);
+ }
+
+ // indicate that when the next token is in the set "follow", nodes in this
+ // state should reduce according to Position "p"
+ state.reductions.put(follow, p);
+ if (followEof.contains(p.owner())) state.eofReductions.add(p);
}
- if (top instanceof IntegerTopology)
+ // optimize the reductions table
+ if (emptyTopology() instanceof IntegerTopology)
for(State<Token> state : all_states) {
- state.oreductions = state.reductions.optimize(((IntegerTopology)top).functor());
- state.oshifts = state.shifts.optimize(((IntegerTopology)top).functor());
+ // FIXME: this is pretty ugly
+ state.oreductions = state.reductions.optimize(((IntegerTopology)emptyTopology()).functor());
+ state.oshifts = state.shifts.optimize(((IntegerTopology)emptyTopology()).functor());
}
+ }
+ // FIXME: this method needs to be cleaned up and documented
+ private void sortReductions() {
// crude algorithm to assing an ordinal ordering to every position
// al will be sorted in DECREASING order (al[0] >= al[1])
ArrayList<Sequence.Position> al = new ArrayList<Sequence.Position>();
}
al.get(i).ord = j;
}
-
- /*
- for(int i=0; i<al.size(); i++)
- if (isRightNullable(al.get(i)))
- System.out.println(al.get(i).ord + " " + al.get(i));
- */
- //mastercache = this;
}
- /** a single state in the LR table and the transitions possible from it */
+ /**
+ * A single state in the LR table and the transitions
+ * possible from it
+ *
+ * A state corresponds to a set of Sequence.Position's. Each
+ * Node in the GSS has a State; the Node represents a set of
+ * possible parses, one for each Position in the State.
+ *
+ * Every state is either "doomed" or "normal". If a Position
+ * is part of a Sequence which is a conjunct (that is, it was
+ * passed to Sequence.{and(),andnot()}), then that Position
+ * will appear only in doomed States. Furthermore, any set
+ * of Positions reachable from a doomed State also forms a
+ * doomed State. Note that in this latter case, a doomed
+ * state might have exactly the same set of Positions as a
+ * non-doomed state.
+ *
+ * Nodes with non-doomed states represent nodes which
+ * contribute to actual valid parses. Nodes with doomed
+ * States exist for no other purpose than to enable/disable
+ * some future reduction from a non-doomed Node. Because of
+ * this, we "garbage-collect" Nodes with doomed states if
+ * there are no more non-doomed Nodes which they could
+ * affect (see Result, Reduction, and Node for details).
+ *
+ * Without this optimization, many seemingly-innocuous uses
+ * of positive and negative conjuncts can trigger O(n^2)
+ * space+time complexity in otherwise simple grammars. There
+ * is an example of this in the regression suite.
+ */
class State<Token> implements IntegerMappable, Iterable<Position> {
public final int idx = master_state_idx++;
private final HashSet<Position> hs;
- public HashSet<State<Token>> also = new HashSet<State<Token>>();
+ public HashSet<State<Token>> conjunctStates = new HashSet<State<Token>>();
- public transient HashMap<Sequence,State<Token>> gotoSetNonTerminals = new HashMap<Sequence,State<Token>>();
- private transient TopologicalBag<Token,State<Token>> gotoSetTerminals = new TopologicalBag<Token,State<Token>>();
+ HashMap<Sequence,State<Token>> gotoSetNonTerminals = new HashMap<Sequence,State<Token>>();
+ private transient TopologicalBag<Token,State<Token>> gotoSetTerminals = new TopologicalBag<Token,State<Token>>();
private TopologicalBag<Token,Position> reductions = new TopologicalBag<Token,Position>();
private HashSet<Position> eofReductions = new HashSet<Position>();
private VisitableMap<Token,State<Token>> oshifts = null;
private VisitableMap<Token,Position> oreductions = null;
+ public final boolean doomed;
// Interface Methods //////////////////////////////////////////////////////////////////////////////
* for non-Atom Elements.
* </ul>
*/
- public State(HashSet<Position> hs) { this(hs, false); }
- public boolean doomed;
public State(HashSet<Position> hs, boolean doomed) {
this.hs = hs;
this.doomed = doomed;
- // register ourselves in the all_states hash so that no
- // two states are ever created with an identical position set
+ // register ourselves so that no two states are ever
+ // created with an identical position set (termination depends on this)
((HashMap)(doomed ? doomed_states : normal_states)).put(hs, this);
((HashSet)all_states).add(this);
-
+
for(Position p : hs) {
+ // Step 1a: take note if we are an accepting state
+ // (last position of the root Union's sequence)
+ if (p.next()==null && !doomed && rootUnion.contains(p.owner()))
+ accept = true;
+
+ // Step 1b: If any Position in the set is the first position of its sequence, then this
+ // state is responsible for spawning the "doomed" states for each of the
+ // Sequence's conjuncts. This obligation is recorded by adding the to-be-spawned
+ // states to conjunctStates.
if (!p.isFirst()) continue;
- for(Sequence s : p.owner().needs()) {
- if (hs.contains(s.firstp())) continue;
- HashSet<Position> h2 = new HashSet<Position>();
- reachable(s, h2);
- also.add(mkstate(h2, true));
- }
- for(Sequence s : p.owner().hates()) {
- if (hs.contains(s.firstp())) continue;
- HashSet<Position> h2 = new HashSet<Position>();
- reachable(s, h2);
- also.add(mkstate(h2, true));
- }
+ for(Sequence s : p.owner().needs())
+ if (!hs.contains(s.firstp()))
+ conjunctStates.add(mkstate(reachable(s.firstp()), true));
+ for(Sequence s : p.owner().hates())
+ if (!hs.contains(s.firstp()))
+ conjunctStates.add(mkstate(reachable(s.firstp()), true));
}
- // Step 1a: examine all Position's in this state and compute the mappings from
+ // Step 2a: examine all Position's in this state and compute the mappings from
// sets of follow tokens (tokens which could follow this position) to sets
// of _new_ positions (positions after shifting). These mappings are
// collectively known as the _closure_
bag0.addAll(a.getTokenTopology(), hp);
}
- // Step 1b: for each _minimal, contiguous_ set of characters having an identical next-position
+ // Step 2b: for each _minimal, contiguous_ set of characters having an identical next-position
// set, add that character set to the goto table (with the State corresponding to the
// computed next-position set).
((TopologicalBag)gotoSetTerminals).put(r, mkstate(h, doomed));
}
- // Step 2: for every Sequence, compute the closure over every position in this set which
+ // Step 3: for every Sequence, compute the closure over every position in this set which
// is followed by a symbol which could yield the Sequence.
//
// "yields" [in one or more step] is used instead of "produces" [in exactly one step]
}
private State<Token> mkstate(HashSet<Position> h, boolean b) {
- if (b) return doomed_states.get(h) == null ? (State)new State<Token>(h,b) : (State)doomed_states.get(h);
- else return normal_states.get(h) == null ? (State)new State<Token>(h,b) : (State)normal_states.get(h);
- }
-
- public String toStringx() {
- StringBuffer st = new StringBuffer();
- for(Position p : this) {
- if (st.length() > 0) st.append("\n");
- st.append(p);
- }
- return st.toString();
- }
-
- public String toString() {
- StringBuffer ret = new StringBuffer();
- ret.append("state["+idx+"]: ");
- for(Position p : this) ret.append("{"+p+"} ");
- return ret.toString();
+ State ret = (b?doomed_states:normal_states).get(h);
+ if (ret==null) ret = new State<Token>(h,b);
+ return ret;
}
public int toInt() { return idx; }
}
- public String toString() {
- StringBuffer sb = new StringBuffer();
- sb.append("parse table");
- for(State<Token> state : all_states) {
- sb.append(" " + state + "\n");
- for(Topology<Token> t : state.shifts) {
- sb.append(" shift \""+
- new edu.berkeley.sbp.chr.CharTopology((IntegerTopology<Character>)t)+"\" => ");
- for(State st : state.shifts.getAll(t))
- sb.append(st.idx+" ");
- sb.append("\n");
- }
- for(Topology<Token> t : state.reductions)
- sb.append(" reduce \""+
- new edu.berkeley.sbp.chr.CharTopology((IntegerTopology<Character>)t)+"\" => " +
- state.reductions.getAll(t) + "\n");
- for(Sequence s : state.gotoSetNonTerminals.keySet())
- sb.append(" goto "+state.gotoSetNonTerminals.get(s)+" from " + s + "\n");
- }
- return sb.toString();
- }
}
// Helpers //////////////////////////////////////////////////////////////////////////////
- private static void reachable(Sequence s, HashSet<Position> h) {
- reachable(s.firstp(), h);
- //for(Sequence ss : s.needs()) reachable(ss, h);
- //for(Sequence ss : s.hates()) reachable(ss, h);
+ private static HashSet<Position> reachable(Element e) {
+ HashSet<Position> h = new HashSet<Position>();
+ reachable(e, h);
+ return h;
}
private static void reachable(Element e, HashSet<Position> h) {
if (e instanceof Atom) return;
for(Sequence s : ((Union)e))
- reachable(s, h);
+ reachable(s.firstp(), h);
}
private static void reachable(Position p, HashSet<Position> h) {
if (h.contains(p)) return;
h.add(p);
if (p.element() != null) reachable(p.element(), h);
}
- //public static Cache mastercache = null;
+ private static HashSet<Position> reachable(Position p) {
+ HashSet<Position> ret = new HashSet<Position>();
+ reachable(p, ret);
+ return ret;
+ }
+
}
this.firstp = new Position(0, null);
}
- // DO NOT MESS WITH THE FOLLOWING LINE!!!
- private Forest.Many epsilonForm = null;
- Forest epsilonForm(Input.Region loc) {
- if (epsilonForm!=null) return epsilonForm;
- epsilonForm = new Forest.Many();
- epsilonForm.merge(firstp().rewrite(loc, false));
- return epsilonForm;
- }
+ abstract Forest epsilonForm(Input.Region loc, Cache cache);
protected abstract <T> Forest<T> postReduce(Input.Region loc, Forest<T>[] args, Position p);
public int ord = -1;
private Forest zero = null;
+ /*
public Forest zero(Input.Region reg) {
if (zero != null) return zero;
if (pos > 0) throw new RuntimeException("Position.zero(): pos>0");
return zero = rewrite(reg);
}
-
+ */
final int pos;
private final Position next;
private final Position prev;
// Position /////////////////////////////////////////////////////////////////////////////////
- final <T> Forest<T> rewrite(Input.Region loc) { return rewrite(loc, true); }
- private final <T> Forest<T> rewrite(Input.Region loc, boolean epsilonCheck) {
- if (epsilonCheck && this==firstp()) return epsilonForm(loc);
+ final <T> Forest<T> rewrite(Input.Region loc, Cache cache) {
+ if (this==firstp()) epsilonForm(loc, cache);
for(int i=0; i<pos; i++) if (holder[i]==null) throw new Error("realbad " + i);
for(int i=pos; i<elements.length; i++) {
- if (holder[i]==null) holder[i] = elements[i].epsilonForm(loc);
- if (holder[i]==null) throw new Error("bad " + i);
+ if (holder[i]==null) holder[i] = ((Union)elements[i]).epsilonForm(loc, cache);
+ if (holder[i]==null) throw new Error("bad");
}
return Sequence.this.postReduce(loc, holder, this);
}
public <T> Forest<T> postReduce(Input.Region loc, Forest<T>[] args, Position p) {
return (Forest<T>)Forest.create(loc, result, null, false);
}
+ Forest epsilonForm(Input.Region loc, Cache cache) {
+ return Forest.create(loc, result, null, false);
+ }
}
static class Singleton extends Sequence {
public Singleton(Element[] e, int idx) { super(e); this.idx = idx; }
public <T> Forest<T> postReduce(Input.Region loc, Forest<T>[] args, Position p) { return args[idx]; }
Sequence _clone() { return new Singleton(elements,idx); }
+ Forest epsilonForm(Input.Region loc, Cache cache) {
+ return ((Union)elements[idx]).epsilonForm(loc, cache);
+ }
}
static class Unwrap extends Sequence {
for(int i=0; i<args.length; i++) if (!drops[i]) args2[j++] = args[i];
return Forest.create(loc, (T)tag, args2, true);
}
+ Forest epsilonForm(Input.Region loc, Cache cache) {
+ return Forest.create(loc, tag, new Forest[0], false);
+ }
}
static class RewritingSequence extends Sequence {
if (spacing) for(int i=0; i<50-len; i++) sb.append(' ');
return sb;
}
+ Forest epsilonForm(Input.Region loc, Cache cache) {
+ return Forest.create(loc, tag, new Forest[0], false);
+ }
}
}
private final List<Sequence> alternatives = new ArrayList<Sequence>();
public Union(String name) { this(name, false); }
- public Union(String name, Sequence s) { this(name, false); add(s); }
+ public Union(String name, Sequence s) { this(name, s, false); }
+ public Union(String name, Sequence s, boolean synthetic) { this(name, synthetic); add(s); }
/**
* Since every cycle in a non-degenerate grammar contains at
add(Sequence.create(e));
}
-
- // Epsilon Form //////////////////////////////////////////////////////////////////////////////
-
- private Forest.Many epsilonForm = null;
- Forest epsilonForm(Input.Region loc) {
- // FIXME: this is pretty ugly...
+ /** the Forest which results from matching this Union against the empty string at region <tt>region</tt> */
+ Forest epsilonForm(Input.Region region, Cache cache) {
viewed = true;
- if (epsilonForm != null) return epsilonForm;
Forest.Many epsilonForm = new Forest.Many();
- Cache cache = new Cache(null, null);
for(Sequence s : this)
if (cache.possiblyEpsilon(s))
- epsilonForm.merge(s.epsilonForm(loc));
+ epsilonForm.merge(s.epsilonForm(region, cache));
return epsilonForm;
}
package edu.berkeley.sbp.chr;
import java.io.*;
import java.util.*;
-import java.lang.reflect.*;
-import java.lang.ref.*;
import edu.berkeley.sbp.*;
import edu.berkeley.sbp.util.*;
-import edu.berkeley.sbp.misc.*;
-import edu.berkeley.sbp.Input.Location;
public class CharParser extends Parser<Character,String> {
public Forest<String> parse(Reader r) throws IOException, ParseFailed { return super.parse(new CharInput(r)); }
public Forest<String> parse(String s) throws IOException, ParseFailed { return parse(new StringReader(s)); }
- public CharParser(Union u) { super(u, new CharTopology()); }
+ public CharParser(Union u) { super(u); }
- private Location oldloc;
-
- public Forest<String> shiftToken(Character ct, Location newloc) {
- if (oldloc==null) oldloc = newloc;
- Forest<String> ret = Forest.create(oldloc.createRegion(newloc), ct.toString(), null);
- oldloc = newloc;
- return ret;
- }
+ public Topology<Character> emptyTopology() { return new CharTopology(); }
+ public Forest<String> shiftToken(Character ct, Input.Region region) {
+ return Forest.create(region, ct.toString(), null); }
}
+++ /dev/null
-// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
-
-package edu.berkeley.sbp.meta;
-import edu.berkeley.sbp.util.*;
-import edu.berkeley.sbp.*;
-import edu.berkeley.sbp.chr.*;
-import edu.berkeley.sbp.misc.*;
-import java.util.*;
-import java.lang.annotation.*;
-import java.lang.reflect.*;
-import java.io.*;
-
-public class Grammar {
-
- /**
- * Create a grammar from a parse tree and binding resolver
- *
- * @param t a tree produced by parsing a grammar using the metagrammar
- * @param s the name of the "start symbol"
- * @param gbr a GrammarBindingResolver that resolves grammatical reductions into tree-node-heads
- */
- public static Union create(Tree t, String s) {
- return new GrammarBuilder("tests/" /*FIXME*/, "").buildGrammar(t, s);
- }
-
-}
/** The java classes typically used to represent a parsed grammar AST; each inner class is a type of AST node. */
public class GrammarBuilder {
+ /**
+ * Create a grammar from a parse tree and binding resolver
+ *
+ * @param t a tree produced by parsing a grammar using the metagrammar
+ * @param s the name of the "start symbol"
+ * @param gbr a GrammarBindingResolver that resolves grammatical reductions into tree-node-heads
+ */
+ public static Union buildFromAST(Tree grammarAST, String startingNonterminal, File[] includes) {
+ return new GrammarBuilder(includes, "").buildGrammar(grammarAST, startingNonterminal);
+ }
+
public static Object illegalTag = ""; // this is the tag that should never appear in the non-dropped output FIXME
private final String prefix;
- private final String path;
+ private final File[] includes;
//public GrammarBuilder(String path) { this(path, ""); }
- public GrammarBuilder(String path, String prefix) {
+ public GrammarBuilder(File[] includes, String prefix) {
this.prefix = prefix;
- this.path = path;
+ this.includes = includes;
}
public Union buildGrammar(Tree t, String rootNonTerminal) {
if (head.equals("\n")) return "\n";
if (head.equals("\r")) return "\r";
if (head.equals("grammar.Grammar")) return walkChildren(t);
- if (head.equals("SubGrammar")) return Grammar.create(t.child(0), "s");
+ if (head.equals("SubGrammar")) return GrammarBuilder.buildFromAST(t.child(0), "s", includes);
if (head.equals("NonTerminal"))
return new NonTerminalNode((String)walk(t.child(0)),
(Seq[][])walkChildren(t.child(1)), false, null, false);
false,
false);
if (head.equals("#import")) {
- String fileName = path+(String)stringifyChildren(t.child(0));
- try {
- String newPrefix = (String)walk(t.child(1));
- if (newPrefix.length() > 0) newPrefix += ".";
- FileInputStream fis = new FileInputStream(fileName);
- Tree tr = new CharParser(MetaGrammar.newInstance()).parse(fis).expand1();
- return (GrammarNode)new GrammarBuilder(path, newPrefix).walk(tr);
- } catch (Exception e) {
- throw new RuntimeException("while parsing " + fileName, e);
+ String fileName = (String)stringifyChildren(t.child(0));
+ for(File f : includes) {
+ File file = new File(f.getAbsolutePath()+File.separatorChar+fileName);
+ if (!file.exists()) continue;
+ try {
+ String newPrefix = (String)walk(t.child(1));
+ if (newPrefix.length() > 0) newPrefix += ".";
+ FileInputStream fis = new FileInputStream(file);
+ Tree tr = new CharParser(MetaGrammar.newInstance()).parse(fis).expand1();
+ return (GrammarNode)new GrammarBuilder(includes, newPrefix).walk(tr);
+ } catch (Exception e) {
+ throw new RuntimeException("while parsing " + file, e);
+ }
}
+ throw new RuntimeException("unable to find #include file \""+fileName+"\"");
}
throw new RuntimeException("unknown head: " + head + "\n"+t);
}
/** create a grammar corresponding to the SBP metagrammar (<tt>meta.g</tt>) */
public static Union newInstance() {
- return Grammar.create(MetaGrammar.meta, "s");
+ return GrammarBuilder.buildFromAST(MetaGrammar.meta, "s", new File[0]);
}
/** Used to rebuild MetaGrammar.java, and not for much else */
out.append("\n // DO NOT EDIT STUFF BELOW: IT IS AUTOMATICALLY GENERATED\n");
- Tree t = MetaGrammar.meta;
- Union u = Grammar.create(t, "s");
-
- t = new CharParser((Union)u).parse(new FileInputStream(args[0])).expand1();
+ Union u = GrammarBuilder.buildFromAST(MetaGrammar.meta, "s", new File[0]);
+ Tree t = new CharParser((Union)u).parse(new FileInputStream(args[0])).expand1();
t.toJava(out);
out.append("\n // DO NOT EDIT STUFF ABOVE: IT IS AUTOMATICALLY GENERATED\n");
System.out.println("output: "+f.expand1().toPrettyString());
} catch (Ambiguous a) {
System.err.println(a.toString());
- System.err.println(" ambiguous text: " + input.showRegion(a.getRegion()));
+ System.err.println(" ambiguous text: " + input.showRegion(a.getForest().getRegion()));
}
}
public static Tree help(String grammarFile, String targetFile) throws Throwable {
try {
Tree<String> res = new CharParser(MetaGrammar.newInstance()).parse(new FileInputStream(grammarFile)).expand1();
- Union meta = Grammar.create(res, "s");
+ Union meta = GrammarBuilder.buildFromAST(res, "s", new File[0]);
Input input = new Tib(new FileInputStream(targetFile));
Tree ret = new CharParser(meta).parse(input).expand1();
if (ret==null) throw new NullPointerException("CharParser returned null");
import java.util.*;
import java.lang.reflect.*;
import edu.berkeley.sbp.*;
-import edu.berkeley.sbp.misc.*;
import edu.berkeley.sbp.meta.*;
-import edu.berkeley.sbp.tib.*;
import edu.berkeley.sbp.chr.*;
import edu.berkeley.sbp.util.*;
public static boolean yes = false;
public static boolean graph = false;
+ public static File[] includes = new File[] { new File("tests") };
public static void main() throws Exception {
main(new String[] { "tests/meta.g", "tests/testcase.g", "tests/regression.tc" });
System.err.println("parsing " + s[0]);
Tree<String> res = new CharParser(MetaGrammar.newInstance()).parse(new FileInputStream(s[0])).expand1();
- Union meta = Grammar.create(res, "s");
+ Union meta = GrammarBuilder.buildFromAST(res, "s", includes);
System.err.println("parsing " + s[1]);
res = new CharParser(meta).parse(new FileInputStream(s[1])).expand1();
- Union testcasegrammar = Grammar.create(res, "s");
+ Union testcasegrammar = GrammarBuilder.buildFromAST(res, "s", includes);
if (testcasegrammar==null) return;
CharParser parser = new CharParser(testcasegrammar);
}
System.err.println("expanding...");
- TestCase[] expanded = (TestCase[])new GrammarBuilder("tests/", "").walkChildren(r2.expand1());
+ TestCase[] expanded = (TestCase[])new GrammarBuilder(includes, "").walkChildren(r2.expand1());
for(TestCase tc : expanded)
tc.execute();
System.out.println(parser);
}
- HashSet<Tree<String>> results = new HashSet<Tree<String>>();
- if (res != null) res.expand(results);
+ Iterable<Tree<String>> results =
+ res==null ? new HashSet<Tree<String>>() : res.expand();
System.out.print("\r");
- if (results == null || (results.size() == 0 && (output!=null && output.length > 0))) {
+ if (results == null || (!results.iterator().hasNext() && (output!=null && output.length > 0))) {
System.out.print("\033[31m");
System.out.print("FAIL ");
System.out.println("\033[0m "+name);
| epsilon:: ()
}
-testcase "unnamed" {
+testcase "qaq" {
input "qaq";
- output "q:{a:{s1:{epsilon}}}";
+ output "q:{a:{s1}}";
s = ^"q" x "q"
x = ^"a" a