1 // $Id: CQLLexer.java,v 1.2 2002-10-31 22:22:01 mike Exp $
3 package org.z3950.zing.cql;
4 import java.io.StreamTokenizer;
5 import java.io.StringReader;
6 import java.util.Hashtable;
9 // This is a semi-trivial subclass for java.io.StreamTokenizer that:
10 // * Has a halfDecentPushBack() method that actually works
11 // * Includes a render() method
12 // * Knows about the multi-character tokens "<=", ">=" and "<>"
13 // * Recognises a set of keywords as tokens in their own right
14 // * Includes some primitive debugging-output facilities
15 // It's used only by CQLParser.
17 class CQLLexer extends StreamTokenizer {
18 // New publicly visible token-types
19 static int TT_LE = 1000; // The "<=" relation
20 static int TT_GE = 1001; // The ">=" relation
21 static int TT_NE = 1002; // The "<>" relation
22 static int TT_AND = 1003; // The "and" boolean
23 static int TT_OR = 1004; // The "or" boolean
24 static int TT_NOT = 1005; // The "not" boolean
25 static int TT_PROX = 1006; // The "prox" boolean
26 static int TT_ANY = 1007; // The "any" relation
27 static int TT_ALL = 1008; // The "all" relation
28 static int TT_EXACT = 1009; // The "exact" relation
29 static int TT_pWORD = 1010; // The "word" proximity unit
30 static int TT_SENTENCE = 1011; // The "sentence" proximity unit
31 static int TT_PARAGRAPH = 1012; // The "paragraph" proximity unit
32 static int TT_ELEMENT = 1013; // The "element" proximity unit
33 static int TT_ORDERED = 1014; // The "ordered" proximity ordering
34 static int TT_UNORDERED = 1015; // The "unordered" proximity ordering
36 // Support for keywords. It would be nice to compile this linear
37 // list into a Hashtable, but it's hard to store ints as hash
38 // values, and next to impossible to use them as hash keys. So
39 // we'll just scan the (very short) list every time we need to do
41 private class Keyword {
44 Keyword(int token, String keyword) {
46 this.keyword = keyword;
49 // This should logically be static, but Java won't allow it :-P
50 private Keyword[] keywords = {
51 new Keyword(TT_AND, "and"),
52 new Keyword(TT_OR, "or"),
53 new Keyword(TT_NOT, "not"),
54 new Keyword(TT_PROX, "prox"),
55 new Keyword(TT_ANY, "any"),
56 new Keyword(TT_ALL, "all"),
57 new Keyword(TT_EXACT, "exact"),
58 new Keyword(TT_pWORD, "word"),
59 new Keyword(TT_SENTENCE, "sentence"),
60 new Keyword(TT_PARAGRAPH, "paragraph"),
61 new Keyword(TT_ELEMENT, "element"),
62 new Keyword(TT_ORDERED, "ordered"),
63 new Keyword(TT_UNORDERED, "unordered"),
66 // For halfDecentPushBack() and the code at the top of nextToken()
67 private static int TT_UNDEFINED = -1000;
68 private int saved_ttype = TT_UNDEFINED;
69 private double saved_nval;
70 private String saved_sval;
72 // Controls debugging output
73 private static boolean DEBUG;
75 CQLLexer(String cql, boolean lexdebug) {
76 super(new StringReader(cql));
83 wordChars('\'', '\''); // prevent this from introducing strings
88 private static void debug(String str) {
90 System.err.println("LEXDEBUG: " + str);
93 // I don't honestly understand why we need this, but the
94 // documentation for java.io.StreamTokenizer.pushBack() is pretty
95 // vague about its semantics, and it seems to me that they could
96 // be summed up as "it doesn't work". This version has the very
97 // clear semantics "pretend I didn't call nextToken() just then".
99 private void halfDecentPushBack() {
105 public int nextToken() throws java.io.IOException {
106 if (saved_ttype != TT_UNDEFINED) {
110 saved_ttype = TT_UNDEFINED;
111 debug("using saved ttype=" + ttype + ", " +
112 "nval=" + nval + ", sval='" + sval + "'");
116 underlyingNextToken();
118 debug("token starts with '<' ...");
119 underlyingNextToken();
121 debug("token continues with '=' - it's '<='");
123 } else if (ttype == '>') {
124 debug("token continues with '>' - it's '<>'");
127 debug("next token is " + render() + " (pushed back)");
128 halfDecentPushBack();
130 debug("AFTER: ttype is now " + ttype + " - " + render());
132 } else if (ttype == '>') {
133 debug("token starts with '>' ...");
134 underlyingNextToken();
136 debug("token continues with '=' - it's '>='");
139 debug("next token is " + render() + " (pushed back)");
140 halfDecentPushBack();
142 debug("AFTER: ttype is now " + ttype + " - " + render());
146 debug("done nextToken(): ttype=" + ttype + ", " +
147 "nval=" + nval + ", " + "sval='" + sval + "'" +
148 " (" + render() + ")");
153 // It's important to do keyword recognition here at the lowest
154 // level, otherwise when one of these words follows "<" or ">"
155 // (which can be the beginning of multi-character tokens) it gets
156 // pushed back as a string, and its keywordiness is not
159 public int underlyingNextToken() throws java.io.IOException {
161 if (ttype == TT_WORD)
162 for (int i = 0; i < keywords.length; i++)
163 if (sval.equalsIgnoreCase(keywords[i].keyword))
164 ttype = keywords[i].token;
169 // Simpler interface for the usual case: current token with quoting
171 return render(ttype, true);
174 String render(int token, boolean quoteChars) {
175 if (token == TT_EOF) {
177 } else if (token == TT_NUMBER) {
178 return new Integer((int) nval).toString();
179 } else if (token == TT_WORD) {
180 return "word: " + sval;
181 } else if (token == '"') {
182 return "string: \"" + sval + "\"";
183 } else if (token == TT_LE) {
185 } else if (token == TT_GE) {
187 } else if (token == TT_NE) {
191 // Check whether its associated with one of the keywords
192 for (int i = 0; i < keywords.length; i++)
193 if (token == keywords[i].token)
194 return keywords[i].keyword;
196 // Otherwise it must be a single character, such as '(' or '/'.
197 String res = String.valueOf((char) token);
198 if (quoteChars) res = "'" + res + "'";
202 public static void main(String[] args) throws Exception {
203 if (args.length > 1) {
204 System.err.println("Usage: CQLLexer [<CQL-query>]");
205 System.err.println("If unspecified, query is read from stdin");
210 if (args.length == 1) {
213 byte[] bytes = new byte[10000];
215 // Read in the whole of standard input in one go
216 int nbytes = System.in.read(bytes);
217 } catch (java.io.IOException ex) {
218 System.err.println("Can't read query: " + ex.getMessage());
221 cql = new String(bytes);
224 CQLLexer lexer = new CQLLexer(cql, true);
226 while ((token = lexer.nextToken()) != TT_EOF) {
227 // Nothing to do: debug() statements render tokens for us