X-Git-Url: http://git.megacz.com/?p=sbp.git;a=blobdiff_plain;f=src%2Fedu%2Fberkeley%2Fsbp%2FParser.java;h=df94e8301093752745e1314b8dcee5d69b5a364a;hp=130ece9bfaeded7237c6f52a28a8a504565648d9;hb=658e5249cdadcd4037a8d6744f8cad0825d77f37;hpb=21f5d362429cbbb7dafc7d307a2a1f6682e130f9 diff --git a/src/edu/berkeley/sbp/Parser.java b/src/edu/berkeley/sbp/Parser.java index 130ece9..df94e83 100644 --- a/src/edu/berkeley/sbp/Parser.java +++ b/src/edu/berkeley/sbp/Parser.java @@ -9,7 +9,6 @@ import java.util.*; /** a parser which translates an Input<Token> into a Forest<NodeType> */ public abstract class Parser { - protected final Table pt; /** create a parser to parse the grammar with start symbol u */ @@ -38,16 +37,18 @@ public abstract class Parser { loc = input.getLocation(); Token nextToken = input.next(); GSS.Phase next = gss.new Phase(current, current, nextToken, loc, input.getLocation(), forest); - if (!helpgc) { - FileOutputStream fos = new FileOutputStream("out-"+idx+".dot"); - PrintWriter p = new PrintWriter(new OutputStreamWriter(fos)); - GraphViz gv = new GraphViz(); - for(Object n : next) - ((Node)n).toGraphViz(gv); - gv.dump(p); - p.flush(); - p.close(); - } + + /* + FileOutputStream fos = new FileOutputStream("out-"+idx+".dot"); + PrintWriter p = new PrintWriter(new OutputStreamWriter(fos)); + GraphViz gv = new GraphViz(); + for(Object n : current) + ((Node)n).toGraphViz(gv); + gv.dump(p); + p.flush(); + p.close(); + */ + count = next.size(); if (current.isDone()) return (Forest)gss.finalResult; current = next; @@ -62,7 +63,7 @@ public abstract class Parser { public String toString() { StringBuffer sb = new StringBuffer(); sb.append("parse table"); - for(State state : all_states.values()) { + for(State state : all_states) { sb.append(" " + state + "\n"); for(Topology t : state.shifts) { sb.append(" shift \""+ @@ -107,7 +108,9 @@ public abstract class Parser { /** used to generate unique values for State.idx */ private int master_state_idx = 0; - HashMap,State> all_states = new HashMap,State>(); + HashSet> all_states = new HashSet>(); + HashMap,State> doomed_states = new HashMap,State>(); + HashMap,State> normal_states = new HashMap,State>(); HashSet all_elements = new HashSet(); /** construct a parse table for the given grammar */ @@ -129,15 +132,15 @@ public abstract class Parser { HashSet hp = new HashSet(); reachable(start0, hp); - this.dead_state = new State(new HashSet()); + this.dead_state = new State(new HashSet(), true); this.start = new State(hp); // for each state, fill in the corresponding "row" of the parse table - for(State state : all_states.values()) + for(State state : all_states) for(Position p : state.hs) { // the Grammar's designated "last position" is the only accepting state - if (start0.contains(p.owner()) && p.next()==null) + if (start0.contains(p.owner()) && p.next()==null && !state.doomed) state.accept = true; if (isRightNullable(p)) { @@ -160,7 +163,7 @@ public abstract class Parser { state.shifts.addAll(state.gotoSetTerminals.subset(((Atom)p.element()).getTokenTopology())); } if (top instanceof IntegerTopology) - for(State state : all_states.values()) { + for(State state : all_states) { state.oreductions = state.reductions.optimize(((IntegerTopology)top).functor()); state.oshifts = state.shifts.optimize(((IntegerTopology)top).functor()); } @@ -231,28 +234,29 @@ public abstract class Parser { * */ public State(HashSet hs) { this(hs, false); } - public boolean special; - public State(HashSet hs, boolean special) { + public boolean doomed; + public State(HashSet hs, boolean doomed) { this.hs = hs; - this.special = special; + this.doomed = doomed; // register ourselves in the all_states hash so that no // two states are ever created with an identical position set - ((HashMap)all_states).put(hs, this); - + ((HashMap)(doomed ? doomed_states : normal_states)).put(hs, this); + ((HashSet)all_states).add(this); + for(Position p : hs) { if (!p.isFirst()) continue; for(Sequence s : p.owner().needs()) { if (hs.contains(s.firstp())) continue; HashSet h2 = new HashSet(); reachable(s.firstp(), h2); - also.add((State)(all_states.get(h2) == null ? (State)new State(h2,true) : (State)all_states.get(h2))); + also.add(mkstate(h2, true)); } for(Sequence s : p.owner().hates()) { if (hs.contains(s.firstp())) continue; HashSet h2 = new HashSet(); reachable(s, h2); - also.add((State)(all_states.get(h2) == null ? (State)new State(h2,true) : (State)all_states.get(h2))); + also.add(mkstate(h2, true)); } } @@ -277,8 +281,7 @@ public abstract class Parser { for(Topology r : bag0) { HashSet h = new HashSet(); for(Position p : bag0.getAll(r)) h.add(p); - ((TopologicalBag)gotoSetTerminals).put(r, all_states.get(h) == null - ? new State(h) : all_states.get(h)); + ((TopologicalBag)gotoSetTerminals).put(r, mkstate(h, doomed)); } // Step 2: for every non-Atom element (ie every Element which has a corresponding reduction), @@ -302,7 +305,7 @@ public abstract class Parser { } OUTER: for(SequenceOrElement y : move) { HashSet h = move.getAll(y); - State s = all_states.get(h) == null ? (State)new State(h) : (State)all_states.get(h); + State s = mkstate(h, doomed); // if a reduction is "lame", it should wind up in the dead_state after reducing if (y instanceof Sequence) { for(Position p : hs) { @@ -321,6 +324,11 @@ public abstract class Parser { } } + private State mkstate(HashSet h, boolean b) { + if (b) return doomed_states.get(h) == null ? (State)new State(h,b) : (State)doomed_states.get(h); + else return normal_states.get(h) == null ? (State)new State(h,b) : (State)normal_states.get(h); + } + public String toStringx() { StringBuffer st = new StringBuffer(); for(Position p : this) {