summary patch for Nov->Jan work
[sbp.git] / src / edu / berkeley / sbp / Parser.java
index fa2f7d1..10768f6 100644 (file)
@@ -28,8 +28,8 @@ public abstract class Parser<Token, NodeType> {
         GSS gss = new GSS(input);
         Input.Location loc = input.getLocation();
         Token tok = input.next();
-        GSS.Phase current = gss.new Phase<Token>(null, this, null, tok, loc, input.getLocation(), null);
-        current.newNode(null, Forest.create(loc.createRegion(loc), null, null, false), pt.start, true);
+        GSS.Phase current = gss.new Phase<Token>(null, null, tok, loc, input.getLocation(), null);
+        current.newNode(new Result(Forest.create(loc.createRegion(loc), null, null, false), null, null), pt.start, true);
         int count = 1;
         for(int idx=0;;idx++) {
             Input.Location oldloc = loc;
@@ -37,7 +37,7 @@ public abstract class Parser<Token, NodeType> {
             Forest forest = current.token==null ? null : shiftToken((Token)current.token, loc);
             loc = input.getLocation();
             Token nextToken = input.next();
-            GSS.Phase next = gss.new Phase<Token>(current, this, current, nextToken, loc, input.getLocation(), forest);
+            GSS.Phase next = gss.new Phase<Token>(current, current, nextToken, loc, input.getLocation(), forest);
             if (!helpgc) {
                 FileOutputStream fos = new FileOutputStream("out-"+idx+".dot");
                 PrintWriter p = new PrintWriter(new OutputStreamWriter(fos));
@@ -106,6 +106,7 @@ public abstract class Parser<Token, NodeType> {
         /** used to generate unique values for State.idx */
         private int master_state_idx = 0;
         HashMap<HashSet<Position>,State<Token>>   all_states    = new HashMap<HashSet<Position>,State<Token>>();
+        HashSet<SequenceOrElement>                all_elements  = new HashSet<SequenceOrElement>();
 
         /** construct a parse table for the given grammar */
         public Table(Topology top) { this("s", top); }
@@ -118,15 +119,14 @@ public abstract class Parser<Token, NodeType> {
             cache.eof.put(start0, true);
 
             // construct the set of states
-            HashSet<SequenceOrElement>                        all_elements  = new HashSet<SequenceOrElement>();
             walk(start0, all_elements);
             for(SequenceOrElement e : all_elements)
                 cache.ys.addAll(e, new Walk.YieldSet(e, cache).walk());
             HashSet<Position> hp = new HashSet<Position>();
             reachable(start0, hp);
 
-            this.dead_state = new State<Token>(new HashSet<Position>(), all_states, all_elements);
-            this.start = new State<Token>(hp, all_states, all_elements);
+            this.dead_state = new State<Token>(new HashSet<Position>());
+            this.start = new State<Token>(hp);
 
             // for each state, fill in the corresponding "row" of the parse table
             for(State<Token> state : all_states.values())
@@ -139,8 +139,13 @@ public abstract class Parser<Token, NodeType> {
                     if (isRightNullable(p)) {
                         Walk.Follow wf = new Walk.Follow(top.empty(), p.owner(), all_elements, cache);
                         Topology follow = wf.walk(p.owner());
-                        for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next())
+                        for(Position p2 = p; p2 != null && p2.element() != null; p2 = p2.next()) {
+                            Atom set = new Walk.EpsilonFollowSet(new edu.berkeley.sbp.chr.CharAtom(top.empty().complement()),
+                                                                 new edu.berkeley.sbp.chr.CharAtom(top.empty()),
+                                                                 cache).walk(p2.element());
                             follow = follow.intersect(new Walk.Follow(top.empty(), p2.element(), all_elements, cache).walk(p2.element()));
+                            if (set != null) follow = follow.intersect(set.getTokenTopology());
+                        }
                         state.reductions.put(follow, p);
                         if (wf.includesEof()) state.eofReductions.add(p);
                     }
@@ -165,10 +170,11 @@ public abstract class Parser<Token, NodeType> {
 
         /** a single state in the LR table and the transitions possible from it */
 
-        class State<Token> implements Comparable<State<Token>>, IntegerMappable, Iterable<Position> {
+        class State<Token> implements IntegerMappable, Iterable<Position> {
         
             public  final     int               idx    = master_state_idx++;
             private final     HashSet<Position> hs;
+            public HashSet<State<Token>> also = new HashSet<State<Token>>();
 
             public transient HashMap<Sequence,State<Token>>         gotoSetNonTerminals = new HashMap<Sequence,State<Token>>();
             private transient TopologicalBag<Token,State<Token>>     gotoSetTerminals    = new TopologicalBag<Token,State<Token>>();
@@ -202,7 +208,6 @@ public abstract class Parser<Token, NodeType> {
             /**
              *  create a new state consisting of all the <tt>Position</tt>s in <tt>hs</tt>
              *  @param hs           the set of <tt>Position</tt>s comprising this <tt>State</tt>
-             *  @param all_states   the set of states already constructed (to avoid recreating states)
              *  @param all_elements the set of all elements (Atom instances need not be included)
              *  
              *   In principle these two steps could be merged, but they
@@ -221,14 +226,31 @@ public abstract class Parser<Token, NodeType> {
              *      for non-Atom Elements.
              *  </ul>
              */
-            public State(HashSet<Position> hs,
-                         HashMap<HashSet<Position>,State<Token>> all_states,
-                         HashSet<SequenceOrElement> all_elements) {
+            public State(HashSet<Position> hs) { this(hs, false); }
+            public boolean special;
+            public State(HashSet<Position> hs, boolean special) {
                 this.hs = hs;
+                this.special = special;
 
                 // register ourselves in the all_states hash so that no
                 // two states are ever created with an identical position set
-                all_states.put(hs, this);
+                ((HashMap)all_states).put(hs, this);
+
+                for(Position p : hs) {
+                    if (!p.isFirst()) continue;
+                    for(Sequence s : p.owner().needs()) {
+                        if (hs.contains(s.firstp())) continue;
+                        HashSet<Position> h2 = new HashSet<Position>();
+                        reachable(s.firstp(), h2);
+                        also.add((State<Token>)(all_states.get(h2) == null ? (State)new State<Token>(h2,true) : (State)all_states.get(h2)));
+                    }
+                    for(Sequence s : p.owner().hates()) {
+                        if (hs.contains(s.firstp())) continue;
+                        HashSet<Position> h2 = new HashSet<Position>();
+                        reachable(s, h2);
+                        also.add((State<Token>)(all_states.get(h2) == null ? (State)new State<Token>(h2,true) : (State)all_states.get(h2)));
+                    }
+                }
 
                 // Step 1a: examine all Position's in this state and compute the mappings from
                 //          sets of follow tokens (tokens which could follow this position) to sets
@@ -251,7 +273,8 @@ public abstract class Parser<Token, NodeType> {
                 for(Topology<Token> r : bag0) {
                     HashSet<Position> h = new HashSet<Position>();
                     for(Position p : bag0.getAll(r)) h.add(p);
-                    gotoSetTerminals.put(r, all_states.get(h) == null ? new State<Token>(h, all_states, all_elements) : all_states.get(h));
+                    ((TopologicalBag)gotoSetTerminals).put(r, all_states.get(h) == null
+                                                           ? new State<Token>(h) : all_states.get(h));
                 }
 
                 // Step 2: for every non-Atom element (ie every Element which has a corresponding reduction),
@@ -274,7 +297,7 @@ public abstract class Parser<Token, NodeType> {
                 }
                 OUTER: for(SequenceOrElement y : move) {
                     HashSet<Position> h = move.getAll(y);
-                    State<Token> s = all_states.get(h) == null ? new State<Token>(h, all_states, all_elements) : all_states.get(h);
+                    State<Token> s = all_states.get(h) == null ? (State)new State<Token>(h) : (State)all_states.get(h);
                     // if a reduction is "lame", it should wind up in the dead_state after reducing
                     if (y instanceof Sequence) {
                         for(Position p : hs) {
@@ -308,7 +331,7 @@ public abstract class Parser<Token, NodeType> {
                 return ret.toString();
             }
 
-            public int compareTo(State<Token> s) { return idx==s.idx ? 0 : idx < s.idx ? -1 : 1; }
+            public Walk.Cache cache() { return cache; }
             public int toInt() { return idx; }
         }
     }
@@ -317,8 +340,8 @@ public abstract class Parser<Token, NodeType> {
     
     private static void reachable(Sequence s, HashSet<Position> h) {
         reachable(s.firstp(), h);
-        for(Sequence ss : s.needs()) reachable(ss, h);
-        for(Sequence ss : s.hates()) reachable(ss, h);
+        //for(Sequence ss : s.needs()) reachable(ss, h);
+        //for(Sequence ss : s.hates()) reachable(ss, h);
     }
     private static void reachable(Element e, HashSet<Position> h) {
         if (e instanceof Atom) return;