if (!fromEmptyReduction) n.queueReductions();
}
+
+ boolean reducing = false;
/** perform all reduction operations */
public void reduce() {
+ reducing = true;
HashSet<Phase.Node> s = new HashSet<Phase.Node>();
s.addAll(hash.values());
- for(Phase.Node n : s) {
- n.queueEmptyReductions();
- n.queueReductions();
- }
+ for(Phase.Node n : s) n.queueEmptyReductions();
+ for(Phase.Node n : s) n.queueReductions();
while(pendingReduct.size()>0)
//pendingReduct.iterator().next().go();
pendingReduct.removeFirst().go();
Forest res = null;
boolean ok = false;
for(Phase.Node n : hash.values()) {
+ if (n.holder==null) continue;
n.holder.resolve();
if (token == null && n.state.isAccepting()) {
ok = true;
error.append("error: unable to shift token \"" + token + "\"\n");
error.append(" before: " +pendingReductions+ "\n");
error.append(" before: " +totalReductions+ "\n");
- for(Phase.Node n : hash.values()) {
- n.queueReductions();
- n.queueEmptyReductions();
- }
+ //for(Phase.Node n : hash.values()) {
+ //n.queueReductions();
+ //n.queueEmptyReductions();
+ //}
error.append(" after: " +pendingReductions+ "\n");
error.append(" candidate states:\n");
for(Phase.Node n : hash.values()) {
/** FIXME */
public void queueEmptyReductions() {
- for(Parser.Table.Reduction r : token==null ? state.getEofReductions() : state.getReductions(token)) {
- if (r.numPop==0)
- newReduct(this, null, r); /* ALLOC */
- }
+ if (reducing)
+ for(Parser.Table.Reduction r : token==null ? state.getEofReductions() : state.getReductions(token))
+ if (r.numPop==0)
+ r.reduce(this, null, this.phase, r.zero());
}
private Node(Node parent, Forest pending, Parser.Table.State state, Phase start) {
reductions.add(this);
pendingReduct.addFirst(this);
pendingReductions++;
+ //if (reducing) go();
}
/** perform the reduction */
pendingReduct.remove(this);
pendingReductions--;
- if (r==null)
+ if (r==null) {
for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) {
+ // UGLY HACK
+ // The problem here is that a "reduction of length 1"
+ // performed twice with different values of n2 needs
+ // to only create a *single* new result, but must add
+ // multiple parents to the node holding that result.
+ // The current reducer doesn't differentiate between
+ // the next node of an n-pop reduction and the
+ // ultimate parent of the last pop, so we need to
+ // cache instances here as a way of avoiding
+ // recreating them.
+
// currently we have this weird problem where we
// have to do an individual reduct for each child
// when the reduction length is one (ie the
// created node rather than part of the popped
// sequence
- if (r.numPop == 1) new Reduct(n, n2, r).go();
+ if (r.numPop == 1) {
+ Forest ret = n.cache().get(r);
+ if (ret != null) r.reduce(n, n2, n.phase, ret);
+ else n.cache().put(r, r.reduce(n, n2, n.phase, null));
+ }
}
-
-
- // FIXME: explain this
- if (r==null) {
for(Parser.Table.Reduction r : token==null ? n.state.getEofReductions() : n.state.getReductions(token)) {
if (r.numPop <= 1) continue;
r.reduce(n, n2, Phase.this, null);
}
- } else if (r.numPop==0) { r.reduce(n, n2, n.phase, r.zero());
- } else if (r.numPop==1) {
- // UGLY HACK
- // The problem here is that a "reduction of length 0/1"
- // performed twice with different values of n2 needs
- // to only create a *single* new result, but must add
- // multiple parents to the node holding that result.
- // The current reducer doesn't differentiate between
- // the next node of an n-pop reduction and the
- // ultimate parent of the last pop, so we need to
- // cache instances here as a way of avoiding
- // recreating them.
-
- Forest ret = n.cache().get(r);
- if (ret != null) r.reduce(n, n2, n.phase, ret);
- else n.cache().put(r, r.reduce(n, n2, n.phase, null));
-
- } else {
+ } else if (r.numPop != 1) {
r.reduce(n, n2, Phase.this, null);
}
}
private static long code(Parser.Table.State state, Phase start) {
return (((long)state.idx) << 32) | (start==null ? 0 : start.pos);
}
-
+ public boolean yak = false;
}