X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;ds=sidebyside;f=src%2Fedu%2Fberkeley%2Fsbp%2FGSS.java;h=7dbb29c11e26fde7e9fc5136f8f06dce4936bed0;hb=db08cd7213a2dbaa5146448d4d6f96b3f12e09b5;hp=497093682d0c02439aeb8a00aedc08c8f70b57c6;hpb=39ab0c455ee3e3833a4e75529fce9af661ae6fe3;p=sbp.git diff --git a/src/edu/berkeley/sbp/GSS.java b/src/edu/berkeley/sbp/GSS.java index 4970936..7dbb29c 100644 --- a/src/edu/berkeley/sbp/GSS.java +++ b/src/edu/berkeley/sbp/GSS.java @@ -122,11 +122,10 @@ class GSS { reducing = true; HashSet s = new HashSet(); s.addAll(hash.values()); + //while(pendingReduct.size()>0) + //pendingReduct.removeFirst().go(); for(Phase.Node n : s) n.queueEmptyReductions(); for(Phase.Node n : s) n.queueReductions(); - while(pendingReduct.size()>0) - //pendingReduct.iterator().next().go(); - pendingReduct.removeFirst().go(); } /** perform all shift operations, adding promoted nodes to next */ @@ -207,23 +206,49 @@ class GSS { queueReductions(n2); } + private HashSet queued = new HashSet(); /** FIXME */ public void queueReductions(Node n2) { - newReduct(this, n2, null); + if (queued.contains(n2)) return; + queued.add(n2); + Node n = this; + for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) { + + // UGLY HACK + // The problem here is that a "reduction of length 1" + // performed twice with different values of n2 needs + // to only create a *single* new result, but must add + // multiple parents to the node holding that result. + // The current reducer doesn't differentiate between + // the next node of an n-pop reduction and the + // ultimate parent of the last pop, so we need to + // cache instances here as a way of avoiding + // recreating them. + + // currently we have this weird problem where we + // have to do an individual reduct for each child + // when the reduction length is one (ie the + // children wind up being children of the newly + // created node rather than part of the popped + // sequence + if (r.numPop <= 0) continue; + if (r.numPop == 1) { + Forest ret = n.cache().get(r); + if (ret != null) r.reduce(n, n2, n.phase, ret); + else n.cache().put(r, r.reduce(n, n2, n.phase, null)); + } else { + r.reduce(n, n2, Phase.this, null); + } + } } /** FIXME */ public void queueEmptyReductions() { - if (reducing) { - for(Parser.Table.Reduction r : token==null ? state.getEofReductions() : state.getReductions(token)) { - if (r.numPop==0) { - //r.reduce(this, null, this.phase, r.zero()); - Reduct red = new Reduct(this, null, r); - red.go(); /* ALLOC */ - } - } - } + if (reducing) + for(Parser.Table.Reduction r : token==null ? state.getEofReductions() : state.getReductions(token)) + if (r.numPop==0) + r.reduce(this, null, this.phase, r.zero()); } private Node(Node parent, Forest pending, Parser.Table.State state, Phase start) { @@ -237,10 +262,6 @@ class GSS { } } - public void newReduct(Node n, Node n2, Parser.Table.Reduction r) { - new Reduct(n, n2, r)/*.go()*/; - } - // Forest / Completed Reductions ////////////////////////////////////////////////////////////////////////////// /** a pending or completed reduction */ @@ -266,57 +287,50 @@ class GSS { this.n = n; this.n2 = n2; this.r = r; - if (reductions.contains(this)) { done = true; return; } - reductions.add(this); - pendingReduct.addFirst(this); - pendingReductions++; + //if (reductions.contains(this)) { done = true; return; } + //reductions.add(this); + //pendingReduct.addFirst(this); + //pendingReductions++; + go(); } /** perform the reduction */ public void go() { if (done) return; done = true; - pendingReduct.remove(this); - pendingReductions--; + //pendingReduct.remove(this); + //pendingReductions--; - if (r==null) + if (r==null) { for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) { + // UGLY HACK + // The problem here is that a "reduction of length 1" + // performed twice with different values of n2 needs + // to only create a *single* new result, but must add + // multiple parents to the node holding that result. + // The current reducer doesn't differentiate between + // the next node of an n-pop reduction and the + // ultimate parent of the last pop, so we need to + // cache instances here as a way of avoiding + // recreating them. + // currently we have this weird problem where we // have to do an individual reduct for each child // when the reduction length is one (ie the // children wind up being children of the newly // created node rather than part of the popped // sequence - - if (r.numPop == 1) new Reduct(n, n2, r)/*.go()*/; - } - - - // FIXME: explain this - if (r==null) { - for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) { - if (r.numPop <= 1) continue; - r.reduce(n, n2, Phase.this, null); + if (r.numPop <= 0) continue; + if (r.numPop == 1) { + Forest ret = n.cache().get(r); + if (ret != null) r.reduce(n, n2, n.phase, ret); + else n.cache().put(r, r.reduce(n, n2, n.phase, null)); + } else { + r.reduce(n, n2, Phase.this, null); + } } - } else if (r.numPop==0) { r.reduce(n, n2, n.phase, r.zero()); - } else if (r.numPop==1) { - // UGLY HACK - // The problem here is that a "reduction of length 1" - // performed twice with different values of n2 needs - // to only create a *single* new result, but must add - // multiple parents to the node holding that result. - // The current reducer doesn't differentiate between - // the next node of an n-pop reduction and the - // ultimate parent of the last pop, so we need to - // cache instances here as a way of avoiding - // recreating them. - - Forest ret = n.cache().get(r); - if (ret != null) r.reduce(n, n2, n.phase, ret); - else n.cache().put(r, r.reduce(n, n2, n.phase, null)); - - } else { + } else if (r.numPop != 1) { r.reduce(n, n2, Phase.this, null); } }