UnwrapLeft, error reporting improvements
[sbp.git] / src / edu / berkeley / sbp / Parser.java
index 1ea89ad..0b2229d 100644 (file)
+// Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
+
 package edu.berkeley.sbp;
-import edu.berkeley.sbp.*;
 import edu.berkeley.sbp.util.*;
 import edu.berkeley.sbp.Sequence.Position;
 import java.io.*;
 import java.util.*;
 
-/** a parser which translates streams of Tokens of type T into a Forest<R> */
-public abstract class Parser<Tok, Result> {
+// FEATURE: try harder to "fuse" states together along two dimensions:
+//   - identical (equivalent) states, or states that subsume each other
+//   - unnecessary intermediate states ("short cut" GLR)
+
+/** a parser which translates an Input&lt;Token&gt; into a Forest&lt;NodeType&gt; */
+public abstract class Parser<Token, NodeType> {
 
-    protected final Table<Tok> pt;
+    final Table pt;
 
     /** create a parser to parse the grammar with start symbol <tt>u</tt> */
-    protected Parser(Union u, Topology<Tok> top)  { this.pt = new Table<Tok>(u, top); }
-    protected Parser(Table<Tok> pt)               { this.pt = pt; }
+    public Parser(Union u)  { this.pt = new Table(u); }
 
     /** implement this method to create the output forest corresponding to a lone shifted input token */
-    public abstract Forest<Result> shiftToken(Tok t, Input.Location loc);
-
-    /** parse <tt>input</tt>, using the table <tt>pt</tt> to drive the parser */
-    public Forest<Result> parse(Input<Tok> input) throws IOException, ParseFailed {
-        GSS gss = new GSS();
-        Input.Location loc = input.getLocation();
-        GSS.Phase current = gss.new Phase<Tok>(null, this, null, input.next(1, 0, 0), loc, null);
-        current.newNode(null, Forest.leaf(null, null, null), pt.start, true);
-        int count = 1;
-        for(;;) {
-            loc = input.getLocation();
-            current.reduce();
-            Forest forest = current.token==null ? null : shiftToken((Tok)current.token, loc);
-            GSS.Phase next = gss.new Phase<Tok>(current, this, current, input.next(count, gss.resets, gss.waits), loc, forest);
-            count = next.size();
-            if (current.isDone()) return (Forest<Result>)gss.finalResult;
-            current = next;
+    public abstract Forest<NodeType> shiftToken(Token t, Input.Region region);
+
+    public abstract Topology<Token> emptyTopology();
+
+    public String toString() { return pt.toString(); }
+    Grammar cache() { return pt; }
+
+    /** parse <tt>input</tt>, and return the shared packed parse forest (or throw an exception) */
+    public Forest<NodeType> parse(Input<Token> input) throws IOException, ParseFailed {
+        verbose = System.getProperty("sbp.verbose", null) != null;
+        spinpos = 0;
+        GSS gss = new GSS(input, this);
+        try {
+            for(GSS.Phase current = gss.new Phase<Token>(pt.start); ;) {
+                if (verbose) debug(current.token, gss, input);
+                if (current.isDone()) return (Forest<NodeType>)current.finalResult;
+                Input.Region region = current.getLocation().createRegion(current.getNextLocation());
+                Forest forest = shiftToken((Token)current.token, region);
+                current = gss.new Phase<Token>(current, forest);
+            }
+        } finally {
+            if (verbose) {
+                System.err.print("\r"+ANSI.clreol());
+                debug(null, gss, input);
+            }
         }
     }
 
+    // Spinner //////////////////////////////////////////////////////////////////////////////
+
+    private boolean verbose = false;
+    private static final char[] spin = new char[] { '-', '\\', '|', '/' };
+    private int spinpos = 0;
+    private long last = 0;
+    void spin() {
+        if (!verbose) return;
+        long now = System.currentTimeMillis();
+        if (now-last < 70) return;
+        last = now;
+        System.err.print("\r  " + spin[spinpos++ % (spin.length)]+"\r");
+    }
+
+    private int _last = -1;
+    private String buf = "";
+    private void debug(Object t, GSS gss, Input input) {
+        //FIXME
+        int c = t==null ? -1 : ((t+"").charAt(0));
+        int last = _last;
+        _last = c;
+        switch(c) {
+            case edu.berkeley.sbp.chr.CharAtom.left:
+                buf += "\033[31m{\033[0m";
+                break;
+            case edu.berkeley.sbp.chr.CharAtom.right:
+                buf += "\033[31m}\033[0m";
+                break;
+            case -1: // FIXME 
+            case '\n':
+                if (verbose) {
+                    if (last==' ') buf += ANSI.blue("\\n");
+                    System.err.println("\r"+ANSI.clreol()+"\r"+buf);
+                    buf = "";
+                }
+                break;
+            default:
+                buf += ANSI.cyan(""+((char)c));
+                break;
+        }
+        if (t==null) return;
+
+        // FIXME: clean this up
+        String s;
+        s = "  " + spin[spinpos++ % (spin.length)]+" parsing ";
+        s += input.getName();
+        s += " "+input.getLocation();
+        while(s.indexOf(':') != -1 && s.indexOf(':') < 8) s = " " + s;
+        String y = "@"+gss.viewPos+" ";
+        while(y.length() < 9) y = " " + y;
+        s += y;
+        s += "   nodes="+gss.numOldNodes;
+        while(s.length() < 50) s = s + " ";
+        s += " shifted="+gss.numNewNodes;
+        while(s.length() < 60) s = s + " ";
+        s += " reductions="+gss.numReductions;
+        while(s.length() < 78) s = s + " ";
+        System.err.print("\r"+ANSI.invert(s+ANSI.clreol())+"\r");
+    }
+
     // Table //////////////////////////////////////////////////////////////////////////////
 
     /** an SLR(1) parse table which may contain conflicts */
-    public static class Table<Tok> extends Walk.Cache {
-
-        public final Walk.Cache cache = this;
-
-        private void walk(Element e, HashSet<Element> hs) {
-            if (e==null) return;
-            if (hs.contains(e)) return;
-            hs.add(e);
-            if (e instanceof Atom) return;
-            for(Sequence s : (Union)e) {
-                hs.add(s);
-                for(Position p = s.firstp(); p != null; p = p.next())
-                    walk(p.element(), hs);
-            }
-        }
+    class Table extends Grammar<Token> {
 
         /** the start state */
-        public final State<Tok>   start;
+        final State<Token>   start;
+
+        /** a dummy state from which no reductions can be performed */
+        private final State<Token>   dead_state;
 
         /** used to generate unique values for State.idx */
         private int master_state_idx = 0;
-        HashMap<HashSet<Position>,State<Tok>>   all_states    = new HashMap<HashSet<Position>,State<Tok>>();
 
+        /** all the states for this table */
+        HashSet<State<Token>>                     all_states       = new HashSet<State<Token>>();
+
+        /** all the doomed states in this table */
+        HashMap<HashSet<Position>,State<Token>>   doomed_states    = new HashMap<HashSet<Position>,State<Token>>();
+
+        /** all the non-doomed states in this table */
+        HashMap<HashSet<Position>,State<Token>>   normal_states    = new HashMap<HashSet<Position>,State<Token>>();
+
+        Topology<Token> emptyTopology() { return Parser.this.emptyTopology(); }
+    
         /** construct a parse table for the given grammar */
-        public Table(Topology top) { this("s", top); }
-        public Table(String startSymbol, Topology top) { this(new Union(startSymbol), top); }
-        public Table(Union ux, Topology top) {
-            Union start0 = new Union("0");
-            start0.add(new Sequence.Singleton(ux));
-
-            for(Sequence s : start0) cache.eof.put(s, true);
-            cache.eof.put(start0, true);
-
-            // construct the set of states
-            HashSet<Element>                        all_elements  = new HashSet<Element>();
-            walk(start0, all_elements);
-            for(Element e : all_elements)
-                cache.ys.addAll(e, new Walk.YieldSet(e, cache).walk());
-            HashSet<Position> hp = new HashSet<Position>();
-            reachable(start0, hp);
-            this.start = new State<Tok>(hp, all_states, all_elements);
+        Table(Union ux) {
+            super(new Union("0", Sequence.create(ux), true));
 
+            // create the "dead state"
+            this.dead_state = new State<Token>(new HashSet<Position>(), true);
+
+            // construct the start state; this will recursively create *all* the states
+            this.start = new State<Token>(reachable(rootUnion), false);
+
+            buildReductions();
+            sortReductions();
+        }
+
+        /** fill in the reductions table */
+        private void buildReductions() {
             // for each state, fill in the corresponding "row" of the parse table
-            for(State<Tok> state : all_states.values())
+            for(State<Token> state : all_states)
                 for(Position p : state.hs) {
 
-                    // the Grammar's designated "last position" is the only accepting state
-                    if (start0.contains(p.owner()) && p.next()==null)
-                        state.accept = true;
-
-                    if (isRightNullable(p)) {
-                        Walk.Follow wf = new Walk.Follow(top.empty(), p.owner(), all_elements, cache);
-                        Topology follow = wf.walk(p.owner());
-                        for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next())
-                            follow = follow.intersect(new Walk.Follow(top.empty(), p2.element(), all_elements, cache).walk(p2.element()));
-                        state.reductions.put(follow, p);
-                        if (wf.includesEof()) state.eofReductions.add(p);
-                    }
-
                     // if the element following this position is an atom, copy the corresponding
                     // set of rows out of the "master" goto table and into this state's shift table
                     if (p.element() != null && p.element() instanceof Atom)
-                        state.shifts.addAll(state.gotoSetTerminals.subset(((Atom)p.element())));
+                        state.shifts.addAll(state.gotoSetTerminals.subset(((Atom)p.element()).getTokenTopology()));
+
+                    // RNGLR: we can potentially reduce from any "right-nullable" position -- that is,
+                    // any position for which all Elements after it in the Sequence are capable of
+                    // matching the empty string.
+                    if (!isRightNullable(p)) continue;
+                    Topology<Token> follow = follow(p.owner());
+                    for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next()) {
+                        if (!(p2.element() instanceof Union))
+                            throw new Error("impossible -- only Unions can be nullable");
+                        
+                        // interesting RNGLR-followRestriction interaction: we must intersect
+                        // not just the follow-set of the last non-nullable element, but the
+                        // follow-sets of the nulled elements as well.
+                        for(Sequence s : ((Union)p2.element()))
+                            follow = follow.intersect(follow(s));
+                        Topology<Token> set = epsilonFollowSet((Union)p2.element());
+                        if (set != null) follow = follow.intersect(set);
+                    }
+                    
+                    // indicate that when the next token is in the set "follow", nodes in this
+                    // state should reduce according to Position "p"
+                    state.reductions.put(follow, p);
+                    if (followEof.contains(p.owner())) state.eofReductions.add(p);
                 }
-            if (top instanceof IntegerTopology)
-                for(State<Tok> state : all_states.values()) {
-                    state.oreductions = state.reductions.optimize(((IntegerTopology)top).functor());
-                    state.oshifts = state.shifts.optimize(((IntegerTopology)top).functor());
+
+            // optimize the reductions table
+            if (emptyTopology() instanceof IntegerTopology)
+                for(State<Token> state : all_states) {
+                    // FIXME: this is pretty ugly
+                    state.oreductions = state.reductions.optimize(((IntegerTopology)emptyTopology()).functor());
+                    state.oshifts     = state.shifts.optimize(((IntegerTopology)emptyTopology()).functor());
                 }
         }
 
-        private boolean isRightNullable(Position p) {
-            if (p.isLast()) return true;
-            if (!possiblyEpsilon(p.element())) return false;
-            return isRightNullable(p.next());
-        }
+        // FIXME: this method needs to be cleaned up and documented
+        private void sortReductions() {
+            // crude algorithm to assing an ordinal ordering to every position
+            // al will be sorted in DECREASING order (al[0] >= al[1])
+            ArrayList<Sequence.Position> al = new ArrayList<Sequence.Position>();
+            for(State s : all_states) {
+                for(Object po : s) {
+                    Sequence.Position p = (Sequence.Position)po;
+                    if (al.contains(p)) continue;
+                    int i=0;
+                    for(; i<al.size(); i++) {
+                        if (comparePositions(p, al.get(i)) < 0)
+                            break;
+                    }
+                    al.add(i, p);
+                }
+            }
+            // FIXME: this actually pollutes the "pure" objects (the ones that should not be modified by the Parser)
+            // sort in increasing order...
+            OUTER: while(true) {
+                for(int i=0; i<al.size(); i++)
+                    for(int j=i+1; j<al.size(); j++)
+                        if (comparePositions(al.get(i), al.get(j)) > 0) {
+                            Sequence.Position p = al.remove(j);
+                            al.add(i, p);
+                            continue OUTER;
+                        }
+                break;
+            }
 
-        /** a single state in the LR table and the transitions possible from it */
+            int j = 1;
+            int pk = 0;
+            for(int i=0; i<al.size(); i++) {
+                boolean inc = false;
+                for(int k=pk; k<i; k++) {
+                    if (comparePositions(al.get(k), al.get(i)) > 0)
+                        { inc = true; break; }
+                }
+                inc = true;
+                if (inc) {
+                    j++;
+                    pk = i;
+                }
+                al.get(i).ord = j;
+            }
+        }
 
-        public class State<Tok> implements Comparable<State<Tok>>, IntegerMappable, Iterable<Position> {
+        /**
+         *  A single state in the LR table and the transitions
+         *  possible from it
+         *
+         *  A state corresponds to a set of Sequence.Position's.  Each
+         *  Node in the GSS has a State; the Node represents a set of
+         *  possible parses, one for each Position in the State.
+         *
+         *  Every state is either "doomed" or "normal".  If a Position
+         *  is part of a Sequence which is a conjunct (that is, it was
+         *  passed to Sequence.{and(),andnot()}), then that Position
+         *  will appear only in doomed States.  Furthermore, any set
+         *  of Positions reachable from a doomed State also forms a
+         *  doomed State.  Note that in this latter case, a doomed
+         *  state might have exactly the same set of Positions as a
+         *  non-doomed state.
+         *
+         *  Nodes with non-doomed states represent nodes which
+         *  contribute to actual valid parses.  Nodes with doomed
+         *  States exist for no other purpose than to enable/disable
+         *  some future reduction from a non-doomed Node.  Because of
+         *  this, we "garbage-collect" Nodes with doomed states if
+         *  there are no more non-doomed Nodes which they could
+         *  affect (see Result, Reduction, and Node for details).
+         *
+         *  Without this optimization, many seemingly-innocuous uses
+         *  of positive and negative conjuncts can trigger O(n^2)
+         *  space+time complexity in otherwise simple grammars.  There
+         *  is an example of this in the regression suite.
+         */
+        class State<Token> implements IntegerMappable, Iterable<Position> {
         
             public  final     int               idx    = master_state_idx++;
             private final     HashSet<Position> hs;
+            public HashSet<State<Token>> conjunctStates = new HashSet<State<Token>>();
 
-            public transient HashMap<Element,State<Tok>>          gotoSetNonTerminals = new HashMap<Element,State<Tok>>();
-            private transient TopologicalBag<Tok,State<Tok>>     gotoSetTerminals    = new TopologicalBag<Tok,State<Tok>>();
+            HashMap<Sequence,State<Token>>      gotoSetNonTerminals = new HashMap<Sequence,State<Token>>();
+            private transient TopologicalBag<Token,State<Token>>  gotoSetTerminals    = new TopologicalBag<Token,State<Token>>();
 
-            private           TopologicalBag<Tok,Position> reductions          = new TopologicalBag<Tok,Position>();
-            private           HashSet<Position>              eofReductions       = new HashSet<Position>();
-            private           TopologicalBag<Tok,State<Tok>>     shifts              = new TopologicalBag<Tok,State<Tok>>();
-            private           boolean                         accept              = false;
+            private           TopologicalBag<Token,Position>      reductions          = new TopologicalBag<Token,Position>();
+            private           HashSet<Position>                   eofReductions       = new HashSet<Position>();
+            private           TopologicalBag<Token,State<Token>>  shifts              = new TopologicalBag<Token,State<Token>>();
+            private           boolean                             accept              = false;
 
-            private VisitableMap<Tok,State<Tok>> oshifts = null;
-            private VisitableMap<Tok,Position> oreductions = null;
+            private VisitableMap<Token,State<Token>> oshifts     = null;
+            private VisitableMap<Token,Position>     oreductions = null;
+            public  final boolean doomed;
 
             // Interface Methods //////////////////////////////////////////////////////////////////////////////
 
-            boolean             isAccepting()               { return accept; }
-            public Iterator<Position>  iterator()                  { return hs.iterator(); }
-
-            boolean             canShift(Tok t)           { return oshifts.contains(t); }
-            <B,C> void          invokeShifts(Tok t, Invokable<State<Tok>,B,C> irbc, B b, C c) {
-                oshifts.invoke(t, irbc, b, c);
+            public boolean doomed() { return doomed; }
+            boolean                    isAccepting()           { return accept; }
+            public Iterator<Position>  iterator()              { return hs.iterator(); }
+            boolean                    canShift(Token t)       { return oshifts!=null && oshifts.contains(t); }
+            void                       invokeShifts(Token t, GSS.Phase phase, Result r) { oshifts.invoke(t, phase, r); }
+            boolean                    canReduce(Token t)        {
+                return oreductions != null && (t==null ? eofReductions.size()>0 : oreductions.contains(t)); }
+            void          invokeEpsilonReductions(Token t, Node node) {
+                if (t==null) for(Position r : eofReductions) node.invoke(r, null);
+                else         oreductions.invoke(t, node, null);
             }
-
-            boolean             canReduce(Tok t)          { return t==null ? eofReductions.size()>0 : oreductions.contains(t); }
-            <B,C> void          invokeReductions(Tok t, Invokable<Position,B,C> irbc, B b, C c) {
-                if (t==null) for(Position r : eofReductions) irbc.invoke(r, b, c);
-                else         oreductions.invoke(t, irbc, b, c);
+            void          invokeReductions(Token t, Node node, Result b) {
+                if (t==null) for(Position r : eofReductions) node.invoke(r, b);
+                else         oreductions.invoke(t, node, b);
             }
 
             // Constructor //////////////////////////////////////////////////////////////////////////////
@@ -154,8 +308,7 @@ public abstract class Parser<Tok, Result> {
             /**
              *  create a new state consisting of all the <tt>Position</tt>s in <tt>hs</tt>
              *  @param hs           the set of <tt>Position</tt>s comprising this <tt>State</tt>
-             *  @param all_states   the set of states already constructed (to avoid recreating states)
-             *  @param all_elements the set of all elements (Atom instances need not be included)
+             *  @param all the set of all elements (Atom instances need not be included)
              *  
              *   In principle these two steps could be merged, but they
              *   are written separately to highlight these two facts:
@@ -173,77 +326,114 @@ public abstract class Parser<Tok, Result> {
              *      for non-Atom Elements.
              *  </ul>
              */
-            public State(HashSet<Position> hs,
-                         HashMap<HashSet<Position>,State<Tok>> all_states,
-                         HashSet<Element> all_elements) {
+            public State(HashSet<Position> hs, boolean doomed) {
                 this.hs = hs;
+                this.doomed = doomed;
 
-                // register ourselves in the all_states hash so that no
-                // two states are ever created with an identical position set
-                all_states.put(hs, this);
+                // register ourselves so that no two states are ever
+                // created with an identical position set (termination depends on this)
+                ((HashMap)(doomed ? doomed_states : normal_states)).put(hs, this);
+                ((HashSet)all_states).add(this);
+
+                for(Position p : hs) {
+                    // Step 1a: take note if we are an accepting state
+                    //          (last position of the root Union's sequence)
+                    if (p.next()==null && !doomed && rootUnion.contains(p.owner()))
+                        accept = true;
+
+                    // Step 1b: If any Position in the set is the first position of its sequence, then this
+                    //          state is responsible for spawning the "doomed" states for each of the
+                    //          Sequence's conjuncts.  This obligation is recorded by adding the to-be-spawned
+                    //          states to conjunctStates.
+                    if (!p.isFirst()) continue;
+                    for(Sequence s : p.owner().needs())
+                        if (!hs.contains(s.firstp()))
+                            conjunctStates.add(mkstate(reachable(s.firstp()), true));
+                    for(Sequence s : p.owner().hates())
+                        if (!hs.contains(s.firstp()))
+                            conjunctStates.add(mkstate(reachable(s.firstp()), true));
+                }
 
-                // Step 1a: examine all Position's in this state and compute the mappings from
+                // Step 2a: examine all Position's in this state and compute the mappings from
                 //          sets of follow tokens (tokens which could follow this position) to sets
                 //          of _new_ positions (positions after shifting).  These mappings are
                 //          collectively known as the _closure_
 
-                TopologicalBag<Tok,Position> bag0 = new TopologicalBag<Tok,Position>();
+                TopologicalBag<Token,Position> bag0 = new TopologicalBag<Token,Position>();
                 for(Position position : hs) {
                     if (position.isLast() || !(position.element() instanceof Atom)) continue;
                     Atom a = (Atom)position.element();
                     HashSet<Position> hp = new HashSet<Position>();
                     reachable(position.next(), hp);
-                    bag0.addAll(a, hp);
+                    bag0.addAll(a.getTokenTopology(), hp);
                 }
 
-                // Step 1b: for each _minimal, contiguous_ set of characters having an identical next-position
+                // Step 2b: for each _minimal, contiguous_ set of characters having an identical next-position
                 //          set, add that character set to the goto table (with the State corresponding to the
                 //          computed next-position set).
 
-                for(Topology<Tok> r : bag0) {
+                for(Topology<Token> r : bag0) {
                     HashSet<Position> h = new HashSet<Position>();
                     for(Position p : bag0.getAll(r)) h.add(p);
-                    gotoSetTerminals.put(r, all_states.get(h) == null ? new State<Tok>(h, all_states, all_elements) : all_states.get(h));
+                    ((TopologicalBag)gotoSetTerminals).put(r, mkstate(h, doomed));
                 }
 
-                // Step 2: for every non-Atom element (ie every Element which has a corresponding reduction),
-                //         compute the closure over every position in this set which is followed by a symbol
-                //         which could yield the Element in question.
+                // Step 3: for every Sequence, compute the closure over every position in this set which
+                //         is followed by a symbol which could yield the Sequence.
                 //
                 //         "yields" [in one or more step] is used instead of "produces" [in exactly one step]
                 //         to avoid having to iteratively construct our set of States as shown in most
                 //         expositions of the algorithm (ie "keep doing XYZ until things stop changing").
-                HashMapBag<Element,Position> move = new HashMapBag<Element,Position>();
-                for(Position p : hs) {
-                    Element e = p.element();
-                    if (e==null) continue;
-                    for(Element y : cache.ys.getAll(e)) {
-                        HashSet<Position> hp = new HashSet<Position>();
-                        reachable(p.next(), hp);
-                        move.addAll(y, hp);
-                    }
-                }
-                for(Element y : move) {
+
+                HashMapBag<Sequence,Position> move = new HashMapBag<Sequence,Position>();
+                for(Position p : hs)
+                    if (!p.isLast() && p.element() instanceof Union)
+                        for(Sequence s : ((Union)p.element())) {
+                            HashSet<Position> hp = new HashSet<Position>();
+                            reachable(p.next(), hp);
+                            move.addAll(s, hp);
+                        }
+                OUTER: for(Sequence y : move) {
+                    // if a reduction is "lame", it should wind up in the dead_state after reducing
                     HashSet<Position> h = move.getAll(y);
-                    State<Tok> s = all_states.get(h) == null ? new State<Tok>(h, all_states, all_elements) : all_states.get(h);
+                    State<Token> s = mkstate(h, doomed);
+                    for(Position p : hs)
+                        if (p.element() != null && (p.element() instanceof Union))
+                            for(Sequence seq : ((Union)p.element()))
+                                if (seq.needs.contains(y) || seq.hates.contains(y)) {
+                                    // FIXME: assumption that no sequence is ever both usefully (non-lamely) matched
+                                    //        and also directly lamely matched
+                                    ((HashMap)gotoSetNonTerminals).put(y, dead_state);
+                                    continue OUTER;
+                                }
                     gotoSetNonTerminals.put(y, s);
                 }
             }
 
+            private State<Token> mkstate(HashSet<Position> h, boolean b) {
+                State ret = (b?doomed_states:normal_states).get(h);
+                if (ret==null) ret = new State<Token>(h,b);
+                return ret;
+            }
+
+            public int toInt() { return idx; }
             public String toString() {
                 StringBuffer ret = new StringBuffer();
-                ret.append("state["+idx+"]: ");
-                for(Position p : this) ret.append("{"+p+"}  ");
+                for(Position p : hs)
+                    ret.append(p+"\n");
                 return ret.toString();
             }
-
-            public int compareTo(State<Tok> s) { return idx==s.idx ? 0 : idx < s.idx ? -1 : 1; }
-            public int toInt() { return idx; }
         }
+
     }
 
     // Helpers //////////////////////////////////////////////////////////////////////////////
     
+    private static HashSet<Position> reachable(Element e) {
+        HashSet<Position> h = new HashSet<Position>();
+        reachable(e, h);
+        return h;
+    }
     private static void reachable(Element e, HashSet<Position> h) {
         if (e instanceof Atom) return;
         for(Sequence s : ((Union)e))
@@ -254,5 +444,10 @@ public abstract class Parser<Tok, Result> {
         h.add(p);
         if (p.element() != null) reachable(p.element(), h);
     }
+    private static HashSet<Position> reachable(Position p) {
+        HashSet<Position> ret = new HashSet<Position>();
+        reachable(p, ret);
+        return ret;
+    }
 
 }