UnwrapLeft, error reporting improvements
[sbp.git] / src / edu / berkeley / sbp / Parser.java
index f239fe5..0b2229d 100644 (file)
 // Copyright 2006 all rights reserved; see LICENSE file for BSD-style license
 
 package edu.berkeley.sbp;
-import edu.berkeley.sbp.*;
 import edu.berkeley.sbp.util.*;
 import edu.berkeley.sbp.Sequence.Position;
 import java.io.*;
 import java.util.*;
 
+// FEATURE: try harder to "fuse" states together along two dimensions:
+//   - identical (equivalent) states, or states that subsume each other
+//   - unnecessary intermediate states ("short cut" GLR)
+
 /** a parser which translates an Input<Token> into a Forest<NodeType> */
 public abstract class Parser<Token, NodeType> {
-    protected final Table<Token> pt;
+
+    final Table pt;
 
     /** create a parser to parse the grammar with start symbol <tt>u</tt> */
-    public Parser(Union u, Topology<Token> top)  { this.pt = new Table<Token>(u, top); }
-    Parser(Table<Token> pt)               { this.pt = pt; }
+    public Parser(Union u)  { this.pt = new Table(u); }
 
     /** implement this method to create the output forest corresponding to a lone shifted input token */
-    public abstract Forest<NodeType> shiftToken(Token t, Input.Location newloc);
+    public abstract Forest<NodeType> shiftToken(Token t, Input.Region region);
 
-    boolean helpgc = true;
+    public abstract Topology<Token> emptyTopology();
 
     public String toString() { return pt.toString(); }
+    Grammar cache() { return pt; }
 
     /** parse <tt>input</tt>, and return the shared packed parse forest (or throw an exception) */
     public Forest<NodeType> parse(Input<Token> input) throws IOException, ParseFailed {
-        GSS gss = new GSS(input);
-        Input.Location loc = input.getLocation();
-        Token tok = input.next();
-        GSS.Phase current = gss.new Phase<Token>(null, null, tok, loc, input.getLocation(), null);
-        current.newNode(new Result(Forest.create(loc.createRegion(loc), null, null, false), null, null), pt.start, true);
-        int count = 1;
-        for(int idx=0;;idx++) {
-            Input.Location oldloc = loc;
-            current.reduce();
-            Forest forest = current.token==null ? null : shiftToken((Token)current.token, loc);
-            loc = input.getLocation();
-            Token nextToken = input.next();
-            GSS.Phase next = gss.new Phase<Token>(current, current, nextToken, loc, input.getLocation(), forest);
-
-            /*
-            FileOutputStream fos = new FileOutputStream("out-"+idx+".dot");
-            PrintWriter p = new PrintWriter(new OutputStreamWriter(fos));
-            GraphViz gv = new GraphViz();
-            for(Object n : current)
-                ((Node)n).toGraphViz(gv);
-            gv.dump(p);
-            p.flush();
-            p.close();
-            */
-
-            count = next.size();
-            if (current.isDone()) return (Forest<NodeType>)gss.finalResult;
-            current = next;
+        verbose = System.getProperty("sbp.verbose", null) != null;
+        spinpos = 0;
+        GSS gss = new GSS(input, this);
+        try {
+            for(GSS.Phase current = gss.new Phase<Token>(pt.start); ;) {
+                if (verbose) debug(current.token, gss, input);
+                if (current.isDone()) return (Forest<NodeType>)current.finalResult;
+                Input.Region region = current.getLocation().createRegion(current.getNextLocation());
+                Forest forest = shiftToken((Token)current.token, region);
+                current = gss.new Phase<Token>(current, forest);
+            }
+        } finally {
+            if (verbose) {
+                System.err.print("\r"+ANSI.clreol());
+                debug(null, gss, input);
+            }
         }
     }
 
-    // Table //////////////////////////////////////////////////////////////////////////////
+    // Spinner //////////////////////////////////////////////////////////////////////////////
+
+    private boolean verbose = false;
+    private static final char[] spin = new char[] { '-', '\\', '|', '/' };
+    private int spinpos = 0;
+    private long last = 0;
+    void spin() {
+        if (!verbose) return;
+        long now = System.currentTimeMillis();
+        if (now-last < 70) return;
+        last = now;
+        System.err.print("\r  " + spin[spinpos++ % (spin.length)]+"\r");
+    }
 
-    /** an SLR(1) parse table which may contain conflicts */
-    static class Table<Token> extends Walk.Cache {
-
-        public String toString() {
-            StringBuffer sb = new StringBuffer();
-            sb.append("parse table");
-            for(State<Token> state : all_states.values()) {
-                sb.append("  " + state + "\n");
-                for(Topology<Token> t : state.shifts) {
-                    sb.append("      shift  \""+
-                              new edu.berkeley.sbp.chr.CharTopology((IntegerTopology<Character>)t)+"\" => ");
-                    for(State st : state.shifts.getAll(t))
-                        sb.append(st.idx+"  ");
-                    sb.append("\n");
+    private int _last = -1;
+    private String buf = "";
+    private void debug(Object t, GSS gss, Input input) {
+        //FIXME
+        int c = t==null ? -1 : ((t+"").charAt(0));
+        int last = _last;
+        _last = c;
+        switch(c) {
+            case edu.berkeley.sbp.chr.CharAtom.left:
+                buf += "\033[31m{\033[0m";
+                break;
+            case edu.berkeley.sbp.chr.CharAtom.right:
+                buf += "\033[31m}\033[0m";
+                break;
+            case -1: // FIXME 
+            case '\n':
+                if (verbose) {
+                    if (last==' ') buf += ANSI.blue("\\n");
+                    System.err.println("\r"+ANSI.clreol()+"\r"+buf);
+                    buf = "";
                 }
-                for(Topology<Token> t : state.reductions)
-                    sb.append("      reduce \""+
-                              new edu.berkeley.sbp.chr.CharTopology((IntegerTopology<Character>)t)+"\" => " +
-                              state.reductions.getAll(t) + "\n");
-                for(Sequence s : state.gotoSetNonTerminals.keySet())
-                    sb.append("      goto   "+state.gotoSetNonTerminals.get(s)+" from " + s + "\n");
-            }
-            return sb.toString();
+                break;
+            default:
+                buf += ANSI.cyan(""+((char)c));
+                break;
         }
+        if (t==null) return;
+
+        // FIXME: clean this up
+        String s;
+        s = "  " + spin[spinpos++ % (spin.length)]+" parsing ";
+        s += input.getName();
+        s += " "+input.getLocation();
+        while(s.indexOf(':') != -1 && s.indexOf(':') < 8) s = " " + s;
+        String y = "@"+gss.viewPos+" ";
+        while(y.length() < 9) y = " " + y;
+        s += y;
+        s += "   nodes="+gss.numOldNodes;
+        while(s.length() < 50) s = s + " ";
+        s += " shifted="+gss.numNewNodes;
+        while(s.length() < 60) s = s + " ";
+        s += " reductions="+gss.numReductions;
+        while(s.length() < 78) s = s + " ";
+        System.err.print("\r"+ANSI.invert(s+ANSI.clreol())+"\r");
+    }
 
-        public final Walk.Cache cache = this;
+    // Table //////////////////////////////////////////////////////////////////////////////
 
-        private void walk(Element e, HashSet<SequenceOrElement> hs) {
-            if (e==null) return;
-            if (hs.contains(e)) return;
-            hs.add(e);
-            if (e instanceof Atom) return;
-            for(Sequence s : (Union)e)
-                walk(s, hs);
-        }
-        private void walk(Sequence s, HashSet<SequenceOrElement> hs) {
-            hs.add(s);
-            for(Position p = s.firstp(); p != null; p = p.next())
-                walk(p.element(), hs);
-            for(Sequence ss : s.needs()) walk(ss, hs);
-            for(Sequence ss : s.hates()) walk(ss, hs);
-        }
+    /** an SLR(1) parse table which may contain conflicts */
+    class Table extends Grammar<Token> {
 
         /** the start state */
-        public  final State<Token>   start;
+        final State<Token>   start;
 
-        /** the state from which no reductions can be done */
+        /** a dummy state from which no reductions can be performed */
         private final State<Token>   dead_state;
 
         /** used to generate unique values for State.idx */
         private int master_state_idx = 0;
-        HashMap<HashSet<Position>,State<Token>>   all_states    = new HashMap<HashSet<Position>,State<Token>>();
-        HashSet<SequenceOrElement>                all_elements  = new HashSet<SequenceOrElement>();
 
+        /** all the states for this table */
+        HashSet<State<Token>>                     all_states       = new HashSet<State<Token>>();
+
+        /** all the doomed states in this table */
+        HashMap<HashSet<Position>,State<Token>>   doomed_states    = new HashMap<HashSet<Position>,State<Token>>();
+
+        /** all the non-doomed states in this table */
+        HashMap<HashSet<Position>,State<Token>>   normal_states    = new HashMap<HashSet<Position>,State<Token>>();
+
+        Topology<Token> emptyTopology() { return Parser.this.emptyTopology(); }
+    
         /** construct a parse table for the given grammar */
-        public Table(Topology top) { this("s", top); }
-        public Table(String startSymbol, Topology top) { this(new Union(startSymbol), top); }
-        public Table(Union ux, Topology top) {
-            Union start0 = new Union("0");
-            start0.add(new Sequence.Singleton(ux));
-
-            for(Sequence s : start0) cache.eof.put(s, true);
-            cache.eof.put(start0, true);
-
-            // construct the set of states
-            walk(start0, all_elements);
-            for(SequenceOrElement e : all_elements)
-                cache.ys.addAll(e, new Walk.YieldSet(e, cache).walk());
-            for(SequenceOrElement e : all_elements)
-                cache.ys2.addAll(e, new Walk.YieldSet2(e, cache).walk());
-            HashSet<Position> hp = new HashSet<Position>();
-            reachable(start0, hp);
-
-            this.dead_state = new State<Token>(new HashSet<Position>());
-            this.start = new State<Token>(hp);
+        Table(Union ux) {
+            super(new Union("0", Sequence.create(ux), true));
+
+            // create the "dead state"
+            this.dead_state = new State<Token>(new HashSet<Position>(), true);
+
+            // construct the start state; this will recursively create *all* the states
+            this.start = new State<Token>(reachable(rootUnion), false);
+
+            buildReductions();
+            sortReductions();
+        }
 
+        /** fill in the reductions table */
+        private void buildReductions() {
             // for each state, fill in the corresponding "row" of the parse table
-            for(State<Token> state : all_states.values())
+            for(State<Token> state : all_states)
                 for(Position p : state.hs) {
 
-                    // the Grammar's designated "last position" is the only accepting state
-                    if (start0.contains(p.owner()) && p.next()==null)
-                        state.accept = true;
-
-                    if (isRightNullable(p)) {
-                        Walk.Follow wf = new Walk.Follow(top.empty(), p.owner(), all_elements, cache);
-                        Topology follow = wf.walk(p.owner());
-                        for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next()) {
-                            Atom set = new Walk.EpsilonFollowSet(new edu.berkeley.sbp.chr.CharAtom(top.empty().complement()),
-                                                                 new edu.berkeley.sbp.chr.CharAtom(top.empty()),
-                                                                 cache).walk(p2.element());
-                            follow = follow.intersect(new Walk.Follow(top.empty(), p2.element(), all_elements, cache).walk(p2.element()));
-                            if (set != null) follow = follow.intersect(set.getTokenTopology());
-                        }
-                        state.reductions.put(follow, p);
-                        if (wf.includesEof()) state.eofReductions.add(p);
-                    }
-
                     // if the element following this position is an atom, copy the corresponding
                     // set of rows out of the "master" goto table and into this state's shift table
                     if (p.element() != null && p.element() instanceof Atom)
                         state.shifts.addAll(state.gotoSetTerminals.subset(((Atom)p.element()).getTokenTopology()));
+
+                    // RNGLR: we can potentially reduce from any "right-nullable" position -- that is,
+                    // any position for which all Elements after it in the Sequence are capable of
+                    // matching the empty string.
+                    if (!isRightNullable(p)) continue;
+                    Topology<Token> follow = follow(p.owner());
+                    for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next()) {
+                        if (!(p2.element() instanceof Union))
+                            throw new Error("impossible -- only Unions can be nullable");
+                        
+                        // interesting RNGLR-followRestriction interaction: we must intersect
+                        // not just the follow-set of the last non-nullable element, but the
+                        // follow-sets of the nulled elements as well.
+                        for(Sequence s : ((Union)p2.element()))
+                            follow = follow.intersect(follow(s));
+                        Topology<Token> set = epsilonFollowSet((Union)p2.element());
+                        if (set != null) follow = follow.intersect(set);
+                    }
+                    
+                    // indicate that when the next token is in the set "follow", nodes in this
+                    // state should reduce according to Position "p"
+                    state.reductions.put(follow, p);
+                    if (followEof.contains(p.owner())) state.eofReductions.add(p);
                 }
-            if (top instanceof IntegerTopology)
-                for(State<Token> state : all_states.values()) {
-                    state.oreductions = state.reductions.optimize(((IntegerTopology)top).functor());
-                    state.oshifts = state.shifts.optimize(((IntegerTopology)top).functor());
+
+            // optimize the reductions table
+            if (emptyTopology() instanceof IntegerTopology)
+                for(State<Token> state : all_states) {
+                    // FIXME: this is pretty ugly
+                    state.oreductions = state.reductions.optimize(((IntegerTopology)emptyTopology()).functor());
+                    state.oshifts     = state.shifts.optimize(((IntegerTopology)emptyTopology()).functor());
                 }
         }
 
-        private boolean isRightNullable(Position p) {
-            if (p.isLast()) return true;
-            if (!possiblyEpsilon(p.element())) return false;
-            return isRightNullable(p.next());
-        }
+        // FIXME: this method needs to be cleaned up and documented
+        private void sortReductions() {
+            // crude algorithm to assing an ordinal ordering to every position
+            // al will be sorted in DECREASING order (al[0] >= al[1])
+            ArrayList<Sequence.Position> al = new ArrayList<Sequence.Position>();
+            for(State s : all_states) {
+                for(Object po : s) {
+                    Sequence.Position p = (Sequence.Position)po;
+                    if (al.contains(p)) continue;
+                    int i=0;
+                    for(; i<al.size(); i++) {
+                        if (comparePositions(p, al.get(i)) < 0)
+                            break;
+                    }
+                    al.add(i, p);
+                }
+            }
+            // FIXME: this actually pollutes the "pure" objects (the ones that should not be modified by the Parser)
+            // sort in increasing order...
+            OUTER: while(true) {
+                for(int i=0; i<al.size(); i++)
+                    for(int j=i+1; j<al.size(); j++)
+                        if (comparePositions(al.get(i), al.get(j)) > 0) {
+                            Sequence.Position p = al.remove(j);
+                            al.add(i, p);
+                            continue OUTER;
+                        }
+                break;
+            }
 
-        /** a single state in the LR table and the transitions possible from it */
+            int j = 1;
+            int pk = 0;
+            for(int i=0; i<al.size(); i++) {
+                boolean inc = false;
+                for(int k=pk; k<i; k++) {
+                    if (comparePositions(al.get(k), al.get(i)) > 0)
+                        { inc = true; break; }
+                }
+                inc = true;
+                if (inc) {
+                    j++;
+                    pk = i;
+                }
+                al.get(i).ord = j;
+            }
+        }
 
+        /**
+         *  A single state in the LR table and the transitions
+         *  possible from it
+         *
+         *  A state corresponds to a set of Sequence.Position's.  Each
+         *  Node in the GSS has a State; the Node represents a set of
+         *  possible parses, one for each Position in the State.
+         *
+         *  Every state is either "doomed" or "normal".  If a Position
+         *  is part of a Sequence which is a conjunct (that is, it was
+         *  passed to Sequence.{and(),andnot()}), then that Position
+         *  will appear only in doomed States.  Furthermore, any set
+         *  of Positions reachable from a doomed State also forms a
+         *  doomed State.  Note that in this latter case, a doomed
+         *  state might have exactly the same set of Positions as a
+         *  non-doomed state.
+         *
+         *  Nodes with non-doomed states represent nodes which
+         *  contribute to actual valid parses.  Nodes with doomed
+         *  States exist for no other purpose than to enable/disable
+         *  some future reduction from a non-doomed Node.  Because of
+         *  this, we "garbage-collect" Nodes with doomed states if
+         *  there are no more non-doomed Nodes which they could
+         *  affect (see Result, Reduction, and Node for details).
+         *
+         *  Without this optimization, many seemingly-innocuous uses
+         *  of positive and negative conjuncts can trigger O(n^2)
+         *  space+time complexity in otherwise simple grammars.  There
+         *  is an example of this in the regression suite.
+         */
         class State<Token> implements IntegerMappable, Iterable<Position> {
         
             public  final     int               idx    = master_state_idx++;
             private final     HashSet<Position> hs;
-            public HashSet<State<Token>> also = new HashSet<State<Token>>();
+            public HashSet<State<Token>> conjunctStates = new HashSet<State<Token>>();
 
-            public transient HashMap<Sequence,State<Token>>         gotoSetNonTerminals = new HashMap<Sequence,State<Token>>();
-            private transient TopologicalBag<Token,State<Token>>     gotoSetTerminals    = new TopologicalBag<Token,State<Token>>();
+            HashMap<Sequence,State<Token>>      gotoSetNonTerminals = new HashMap<Sequence,State<Token>>();
+            private transient TopologicalBag<Token,State<Token>>  gotoSetTerminals    = new TopologicalBag<Token,State<Token>>();
 
-            private           TopologicalBag<Token,Position> reductions          = new TopologicalBag<Token,Position>();
-            private           HashSet<Position>              eofReductions       = new HashSet<Position>();
-            private           TopologicalBag<Token,State<Token>>     shifts              = new TopologicalBag<Token,State<Token>>();
-            private           boolean                         accept              = false;
+            private           TopologicalBag<Token,Position>      reductions          = new TopologicalBag<Token,Position>();
+            private           HashSet<Position>                   eofReductions       = new HashSet<Position>();
+            private           TopologicalBag<Token,State<Token>>  shifts              = new TopologicalBag<Token,State<Token>>();
+            private           boolean                             accept              = false;
 
-            private VisitableMap<Token,State<Token>> oshifts = null;
-            private VisitableMap<Token,Position> oreductions = null;
+            private VisitableMap<Token,State<Token>> oshifts     = null;
+            private VisitableMap<Token,Position>     oreductions = null;
+            public  final boolean doomed;
 
             // Interface Methods //////////////////////////////////////////////////////////////////////////////
 
-            boolean             isAccepting()           { return accept; }
-            public Iterator<Position>  iterator()       { return hs.iterator(); }
-
-            boolean             canShift(Token t)         { return oshifts!=null && oshifts.contains(t); }
-            <B,C> void          invokeShifts(Token t, Invokable<State<Token>,B,C> irbc, B b, C c) {
-                oshifts.invoke(t, irbc, b, c);
+            public boolean doomed() { return doomed; }
+            boolean                    isAccepting()           { return accept; }
+            public Iterator<Position>  iterator()              { return hs.iterator(); }
+            boolean                    canShift(Token t)       { return oshifts!=null && oshifts.contains(t); }
+            void                       invokeShifts(Token t, GSS.Phase phase, Result r) { oshifts.invoke(t, phase, r); }
+            boolean                    canReduce(Token t)        {
+                return oreductions != null && (t==null ? eofReductions.size()>0 : oreductions.contains(t)); }
+            void          invokeEpsilonReductions(Token t, Node node) {
+                if (t==null) for(Position r : eofReductions) node.invoke(r, null);
+                else         oreductions.invoke(t, node, null);
             }
-
-            boolean             canReduce(Token t)        { return oreductions != null && (t==null ? eofReductions.size()>0 : oreductions.contains(t)); }
-            <B,C> void          invokeReductions(Token t, Invokable<Position,B,C> irbc, B b, C c) {
-                if (t==null) for(Position r : eofReductions) irbc.invoke(r, b, c);
-                else         oreductions.invoke(t, irbc, b, c);
+            void          invokeReductions(Token t, Node node, Result b) {
+                if (t==null) for(Position r : eofReductions) node.invoke(r, b);
+                else         oreductions.invoke(t, node, b);
             }
 
             // Constructor //////////////////////////////////////////////////////////////////////////////
@@ -213,7 +308,7 @@ public abstract class Parser<Token, NodeType> {
             /**
              *  create a new state consisting of all the <tt>Position</tt>s in <tt>hs</tt>
              *  @param hs           the set of <tt>Position</tt>s comprising this <tt>State</tt>
-             *  @param all_elements the set of all elements (Atom instances need not be included)
+             *  @param all the set of all elements (Atom instances need not be included)
              *  
              *   In principle these two steps could be merged, but they
              *   are written separately to highlight these two facts:
@@ -231,33 +326,35 @@ public abstract class Parser<Token, NodeType> {
              *      for non-Atom Elements.
              *  </ul>
              */
-            public State(HashSet<Position> hs) { this(hs, false); }
-            public boolean special;
-            public State(HashSet<Position> hs, boolean special) {
+            public State(HashSet<Position> hs, boolean doomed) {
                 this.hs = hs;
-                this.special = special;
+                this.doomed = doomed;
 
-                // register ourselves in the all_states hash so that no
-                // two states are ever created with an identical position set
-                ((HashMap)all_states).put(hs, this);
+                // register ourselves so that no two states are ever
+                // created with an identical position set (termination depends on this)
+                ((HashMap)(doomed ? doomed_states : normal_states)).put(hs, this);
+                ((HashSet)all_states).add(this);
 
                 for(Position p : hs) {
+                    // Step 1a: take note if we are an accepting state
+                    //          (last position of the root Union's sequence)
+                    if (p.next()==null && !doomed && rootUnion.contains(p.owner()))
+                        accept = true;
+
+                    // Step 1b: If any Position in the set is the first position of its sequence, then this
+                    //          state is responsible for spawning the "doomed" states for each of the
+                    //          Sequence's conjuncts.  This obligation is recorded by adding the to-be-spawned
+                    //          states to conjunctStates.
                     if (!p.isFirst()) continue;
-                    for(Sequence s : p.owner().needs()) {
-                        if (hs.contains(s.firstp())) continue;
-                        HashSet<Position> h2 = new HashSet<Position>();
-                        reachable(s.firstp(), h2);
-                        also.add((State<Token>)(all_states.get(h2) == null ? (State)new State<Token>(h2,true) : (State)all_states.get(h2)));
-                    }
-                    for(Sequence s : p.owner().hates()) {
-                        if (hs.contains(s.firstp())) continue;
-                        HashSet<Position> h2 = new HashSet<Position>();
-                        reachable(s, h2);
-                        also.add((State<Token>)(all_states.get(h2) == null ? (State)new State<Token>(h2,true) : (State)all_states.get(h2)));
-                    }
+                    for(Sequence s : p.owner().needs())
+                        if (!hs.contains(s.firstp()))
+                            conjunctStates.add(mkstate(reachable(s.firstp()), true));
+                    for(Sequence s : p.owner().hates())
+                        if (!hs.contains(s.firstp()))
+                            conjunctStates.add(mkstate(reachable(s.firstp()), true));
                 }
 
-                // Step 1a: examine all Position's in this state and compute the mappings from
+                // Step 2a: examine all Position's in this state and compute the mappings from
                 //          sets of follow tokens (tokens which could follow this position) to sets
                 //          of _new_ positions (positions after shifting).  These mappings are
                 //          collectively known as the _closure_
@@ -271,93 +368,86 @@ public abstract class Parser<Token, NodeType> {
                     bag0.addAll(a.getTokenTopology(), hp);
                 }
 
-                // Step 1b: for each _minimal, contiguous_ set of characters having an identical next-position
+                // Step 2b: for each _minimal, contiguous_ set of characters having an identical next-position
                 //          set, add that character set to the goto table (with the State corresponding to the
                 //          computed next-position set).
 
                 for(Topology<Token> r : bag0) {
                     HashSet<Position> h = new HashSet<Position>();
                     for(Position p : bag0.getAll(r)) h.add(p);
-                    ((TopologicalBag)gotoSetTerminals).put(r, all_states.get(h) == null
-                                                           ? new State<Token>(h) : all_states.get(h));
+                    ((TopologicalBag)gotoSetTerminals).put(r, mkstate(h, doomed));
                 }
 
-                // Step 2: for every non-Atom element (ie every Element which has a corresponding reduction),
-                //         compute the closure over every position in this set which is followed by a symbol
-                //         which could yield the Element in question.
+                // Step 3: for every Sequence, compute the closure over every position in this set which
+                //         is followed by a symbol which could yield the Sequence.
                 //
                 //         "yields" [in one or more step] is used instead of "produces" [in exactly one step]
                 //         to avoid having to iteratively construct our set of States as shown in most
                 //         expositions of the algorithm (ie "keep doing XYZ until things stop changing").
 
-                HashMapBag<SequenceOrElement,Position> move = new HashMapBag<SequenceOrElement,Position>();
-                for(Position p : hs) {
-                    Element e = p.element();
-                    if (e==null) continue;
-                    for(SequenceOrElement y : cache.ys2.getAll(e)) {
-                        //System.out.println(e + " yields " + y);
-                        HashSet<Position> hp = new HashSet<Position>();
-                        reachable(p.next(), hp);
-                        move.addAll(y, hp);
-                    }
-                }
-                OUTER: for(SequenceOrElement y : move) {
-                    HashSet<Position> h = move.getAll(y);
-                    State<Token> s = all_states.get(h) == null ? (State)new State<Token>(h) : (State)all_states.get(h);
-                    // if a reduction is "lame", it should wind up in the dead_state after reducing
-                    if (y instanceof Sequence) {
-                        for(Position p : hs) {
-                            if (p.element() != null && (p.element() instanceof Union)) {
-                                Union u = (Union)p.element();
-                                for(Sequence seq : u)
-                                    if (seq.needs.contains((Sequence)y) || seq.hates.contains((Sequence)y)) {
-                                        // FIXME: what if there are two "routes" to get to the sequence?
-                                        ((HashMap)gotoSetNonTerminals).put((Sequence)y, dead_state);
-                                        continue OUTER;
-                                    }
-                            }
+                HashMapBag<Sequence,Position> move = new HashMapBag<Sequence,Position>();
+                for(Position p : hs)
+                    if (!p.isLast() && p.element() instanceof Union)
+                        for(Sequence s : ((Union)p.element())) {
+                            HashSet<Position> hp = new HashSet<Position>();
+                            reachable(p.next(), hp);
+                            move.addAll(s, hp);
                         }
-                        gotoSetNonTerminals.put((Sequence)y, s);
-                    }
+                OUTER: for(Sequence y : move) {
+                    // if a reduction is "lame", it should wind up in the dead_state after reducing
+                    HashSet<Position> h = move.getAll(y);
+                    State<Token> s = mkstate(h, doomed);
+                    for(Position p : hs)
+                        if (p.element() != null && (p.element() instanceof Union))
+                            for(Sequence seq : ((Union)p.element()))
+                                if (seq.needs.contains(y) || seq.hates.contains(y)) {
+                                    // FIXME: assumption that no sequence is ever both usefully (non-lamely) matched
+                                    //        and also directly lamely matched
+                                    ((HashMap)gotoSetNonTerminals).put(y, dead_state);
+                                    continue OUTER;
+                                }
+                    gotoSetNonTerminals.put(y, s);
                 }
             }
 
-            public String toStringx() {
-                StringBuffer st = new StringBuffer();
-                for(Position p : this) {
-                    if (st.length() > 0) st.append("\n");
-                    st.append(p);
-                }
-                return st.toString();
+            private State<Token> mkstate(HashSet<Position> h, boolean b) {
+                State ret = (b?doomed_states:normal_states).get(h);
+                if (ret==null) ret = new State<Token>(h,b);
+                return ret;
             }
+
+            public int toInt() { return idx; }
             public String toString() {
                 StringBuffer ret = new StringBuffer();
-                ret.append("state["+idx+"]: ");
-                for(Position p : this) ret.append("{"+p+"}  ");
+                for(Position p : hs)
+                    ret.append(p+"\n");
                 return ret.toString();
             }
-
-            public Walk.Cache cache() { return cache; }
-            public int toInt() { return idx; }
         }
+
     }
 
     // Helpers //////////////////////////////////////////////////////////////////////////////
     
-    private static void reachable(Sequence s, HashSet<Position> h) {
-        reachable(s.firstp(), h);
-        //for(Sequence ss : s.needs()) reachable(ss, h);
-        //for(Sequence ss : s.hates()) reachable(ss, h);
+    private static HashSet<Position> reachable(Element e) {
+        HashSet<Position> h = new HashSet<Position>();
+        reachable(e, h);
+        return h;
     }
     private static void reachable(Element e, HashSet<Position> h) {
         if (e instanceof Atom) return;
         for(Sequence s : ((Union)e))
-            reachable(s, h);
+            reachable(s.firstp(), h);
     }
     private static void reachable(Position p, HashSet<Position> h) {
         if (h.contains(p)) return;
         h.add(p);
         if (p.element() != null) reachable(p.element(), h);
     }
+    private static HashSet<Position> reachable(Position p) {
+        HashSet<Position> ret = new HashSet<Position>();
+        reachable(p, ret);
+        return ret;
+    }
 
 }