X-Git-Url: http://git.megacz.com/?p=sbp.git;a=blobdiff_plain;f=src%2Fedu%2Fberkeley%2Fsbp%2FGSS.java;h=dcc8f3353d89663922812885b8973dc46fefa4ba;hp=d8c0fd45430a2f3cc1f020408a9a25e8ff665212;hb=7fbee73b4dd985cb5b217ed297710c00fd9d7004;hpb=0a0227b9180534d2a431f3d6e08a398bde2244c4 diff --git a/src/edu/berkeley/sbp/GSS.java b/src/edu/berkeley/sbp/GSS.java index d8c0fd4..dcc8f33 100644 --- a/src/edu/berkeley/sbp/GSS.java +++ b/src/edu/berkeley/sbp/GSS.java @@ -45,23 +45,26 @@ class GSS { /** all reductions (pending and completed) */ private HashSet reductions = new HashSet(); /* ALLOC */ - + /** all nodes, keyed by the value returned by code() */ private HashMap hash = new HashMap(); /* ALLOC */ /** the number of pending reductions */ private int pendingReductions = 0; private int totalReductions = 0; - private HashSet pendingReduct = new HashSet(); + //private HashSet pendingReduct = new HashSet(); + private LinkedList pendingReduct = new LinkedList(); /** the number of nodes in this phase */ private int numNodes = 0; boolean closed = false; - public Phase(Phase previous, Token token) { + private Token.Location location; + public Phase(Phase previous, Token token, Token.Location location) { this.pos = previous==null ? 0 : previous.pos+1; this.token = token; + this.location = location; } public boolean isDone() { return token == null; } @@ -72,7 +75,7 @@ class GSS { throw new Parser.Failed(error, getLocation()); } - public Token.Location getLocation() { return token==null ? null : token.getLocation(); } + public Token.Location getLocation() { return location; } /** add a new node (merging with existing nodes if possible) * @param parent the parent of the new node @@ -100,8 +103,8 @@ class GSS { int count = 0; Parser.Table.Reduction r = null; for(Parser.Table.Reduction red : token==null ? state.getEofReductions() : state.getReductions(token)) { r = red; count++; } - //if (count==0) return; -- BEWARE! this optimization is suspected to cause really nasty heisenbugs - if (count > 1) break; + //if (count==0) return; // BEWARE! this optimization is suspected to cause really nasty heisenbugs + //if (count > 1) break; //if (r.numPop == 0) break; //r.reduce(pending, parent, null, Phase.this, null); //return; @@ -114,16 +117,19 @@ class GSS { /** perform all reduction operations */ public void reduce() { - for(Phase.Node n : hash.values()) { + HashSet s = new HashSet(); + s.addAll(hash.values()); + for(Phase.Node n : s) { n.queueEmptyReductions(); n.queueReductions(); } while(pendingReduct.size()>0) - pendingReduct.iterator().next().go(); + //pendingReduct.iterator().next().go(); + pendingReduct.removeFirst().go(); } /** perform all shift operations, adding promoted nodes to next */ - public void shift(Phase next) { + public void shift(Phase next, Forest result) { closed = true; Forest res = null; boolean ok = false; @@ -137,7 +143,7 @@ class GSS { if (!n.holder.valid()) continue; if (token == null) continue; for(Parser.Table.State st : n.state.getShifts(token)) { - if (res == null) res = Forest.create(token.getLocation(), token.result(), null, null, false, false); + if (res == null) res = result; next.newNode(n, res, st, true, this); ok = true; } @@ -171,26 +177,27 @@ class GSS { // GSS Nodes ////////////////////////////////////////////////////////////////////////////// - private HashMap pcache = new HashMap(); + //private HashMap pcache = new HashMap(); /** a node in the GSS */ - public class Node { + public final class Node { private Forest.Ref holder = null; - private HashMap cache = null; - public HashMap cache() { return cache==null ? (cache = new HashMap()) : cache; } - public Forest.Ref holder() { return holder==null ? (holder = new Forest.Ref()) : holder; } - public Forest pending() { return Phase.this.closed ? holder().resolve() : holder; } - public FastSet parents() { return parents; } + private HashMap cache = null; - /** which Phase this Node belongs to (node that Node is also a non-static inner class of Phase) */ - public final Phase phase = Phase.this; + /** the set of nodes to which there is an edge starting at this node */ + public final FastSet parents = new FastSet(); /* ALLOC */ /** what state this node is in */ public final Parser.Table.State state; + /** which Phase this Node belongs to (node that Node is also a non-static inner class of Phase) */ + public final Phase phase = Phase.this; - /** the set of nodes to which there is an edge starting at this node */ - public final FastSet parents = new FastSet(); /* ALLOC */ + public HashMap cache() { + return cache==null ? (cache = new HashMap()) : cache; } + public Forest.Ref holder() { return holder==null ? (holder = new Forest.Ref()) : holder; } + public Forest pending() { return Phase.this.closed ? holder().resolve() : holder; } + public FastSet parents() { return parents; } /** FIXME */ public void queueReductions() { @@ -200,18 +207,7 @@ class GSS { /** FIXME */ public void queueReductions(Node n2) { - new Reduct(this, n2, null); - for(Parser.Table.Reduction r : token==null ? state.getEofReductions() : state.getReductions(token)) { - - // currently we have this weird problem where we - // have to do an individual reduct for each child - // when the reduction length is one (ie the - // children wind up being children of the newly - // created node rather than part of the popped - // sequence - - if (r.numPop == 1) new Reduct(this, n2, r); - } + newReduct(this, n2, null); } @@ -219,7 +215,7 @@ class GSS { public void queueEmptyReductions() { for(Parser.Table.Reduction r : token==null ? state.getEofReductions() : state.getReductions(token)) { if (r.numPop==0) - new Reduct(this, null, r); /* ALLOC */ + newReduct(this, null, r); /* ALLOC */ } } @@ -234,6 +230,9 @@ class GSS { } } + public void newReduct(Node n, Node n2, Parser.Table.Reduction r) { + new Reduct(n, n2, r)/*.go()*/; + } // Forest / Completed Reductions ////////////////////////////////////////////////////////////////////////////// @@ -262,7 +261,7 @@ class GSS { this.r = r; if (reductions.contains(this)) { done = true; return; } reductions.add(this); - pendingReduct.add(this); + pendingReduct.addFirst(this); pendingReductions++; } @@ -273,13 +272,28 @@ class GSS { pendingReduct.remove(this); pendingReductions--; + if (r==null) + for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) { + + // currently we have this weird problem where we + // have to do an individual reduct for each child + // when the reduction length is one (ie the + // children wind up being children of the newly + // created node rather than part of the popped + // sequence + + if (r.numPop == 1) new Reduct(n, n2, r).go(); + } + + // FIXME: explain this if (r==null) { for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) { if (r.numPop <= 1) continue; r.reduce(n, n2, Phase.this, null); } - } else if (r.numPop<=1) { + } else if (r.numPop==0) { r.reduce(n, n2, n.phase, r.zero()); + } else if (r.numPop==1) { // UGLY HACK // The problem here is that a "reduction of length 0/1" // performed twice with different values of n2 needs @@ -291,9 +305,9 @@ class GSS { // cache instances here as a way of avoiding // recreating them. - Forest ret = (r.numPop==0 ? pcache : n.cache()).get(r); + Forest ret = n.cache().get(r); if (ret != null) r.reduce(n, n2, n.phase, ret); - else (r.numPop==0 ? pcache : n.cache()).put(r, r.reduce(n, n2, n.phase, null)); + else n.cache().put(r, r.reduce(n, n2, n.phase, null)); } else { r.reduce(n, n2, Phase.this, null);