From b45d4856fb2643a242e331d857069ac84600671c Mon Sep 17 00:00:00 2001 From: jakobrun Date: Sat, 7 Mar 2015 13:23:57 +0000 Subject: [PATCH] add update and set sql keywords --- browser/sql-parser.js | 4 ++-- lib/lexer.js | 4 ++-- src/lexer.coffee | 6 ++++-- test/lexer.spec.coffee | 12 ++++++++++++ 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/browser/sql-parser.js b/browser/sql-parser.js index cc08cdc..92ea48b 100644 --- a/browser/sql-parser.js +++ b/browser/sql-parser.js @@ -105,7 +105,7 @@ }; Lexer.prototype.keywordToken = function() { - return this.tokenizeFromWord('SELECT') || this.tokenizeFromWord('DISTINCT') || this.tokenizeFromWord('FROM') || this.tokenizeFromWord('WHERE') || this.tokenizeFromWord('GROUP') || this.tokenizeFromWord('ORDER') || this.tokenizeFromWord('BY') || this.tokenizeFromWord('HAVING') || this.tokenizeFromWord('LIMIT') || this.tokenizeFromWord('JOIN') || this.tokenizeFromWord('LEFT') || this.tokenizeFromWord('RIGHT') || this.tokenizeFromWord('INNER') || this.tokenizeFromWord('OUTER') || this.tokenizeFromWord('ON') || this.tokenizeFromWord('AS') || this.tokenizeFromWord('UNION') || this.tokenizeFromWord('ALL'); + return this.tokenizeFromWord('SELECT') || this.tokenizeFromWord('DISTINCT') || this.tokenizeFromWord('FROM') || this.tokenizeFromWord('WHERE') || this.tokenizeFromWord('GROUP') || this.tokenizeFromWord('ORDER') || this.tokenizeFromWord('BY') || this.tokenizeFromWord('HAVING') || this.tokenizeFromWord('LIMIT') || this.tokenizeFromWord('JOIN') || this.tokenizeFromWord('LEFT') || this.tokenizeFromWord('RIGHT') || this.tokenizeFromWord('INNER') || this.tokenizeFromWord('OUTER') || this.tokenizeFromWord('ON') || this.tokenizeFromWord('AS') || this.tokenizeFromWord('UNION') || this.tokenizeFromWord('ALL') || this.tokenizeFromWord('UPDATE') || this.tokenizeFromWord('SET'); }; Lexer.prototype.dotToken = function() { @@ -193,7 +193,7 @@ return str.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&"); }; - SQL_KEYWORDS = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'ORDER BY', 'HAVING', 'AS']; + SQL_KEYWORDS = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'ORDER BY', 'HAVING', 'AS', 'UPDATE', 'SET']; SQL_FUNCTIONS = ['AVG', 'COUNT', 'MIN', 'MAX', 'SUM']; diff --git a/lib/lexer.js b/lib/lexer.js index c049683..30c76f5 100644 --- a/lib/lexer.js +++ b/lib/lexer.js @@ -100,7 +100,7 @@ }; Lexer.prototype.keywordToken = function() { - return this.tokenizeFromWord('SELECT') || this.tokenizeFromWord('DISTINCT') || this.tokenizeFromWord('FROM') || this.tokenizeFromWord('WHERE') || this.tokenizeFromWord('GROUP') || this.tokenizeFromWord('ORDER') || this.tokenizeFromWord('BY') || this.tokenizeFromWord('HAVING') || this.tokenizeFromWord('LIMIT') || this.tokenizeFromWord('JOIN') || this.tokenizeFromWord('LEFT') || this.tokenizeFromWord('RIGHT') || this.tokenizeFromWord('INNER') || this.tokenizeFromWord('OUTER') || this.tokenizeFromWord('ON') || this.tokenizeFromWord('AS') || this.tokenizeFromWord('UNION') || this.tokenizeFromWord('ALL'); + return this.tokenizeFromWord('SELECT') || this.tokenizeFromWord('DISTINCT') || this.tokenizeFromWord('FROM') || this.tokenizeFromWord('WHERE') || this.tokenizeFromWord('GROUP') || this.tokenizeFromWord('ORDER') || this.tokenizeFromWord('BY') || this.tokenizeFromWord('HAVING') || this.tokenizeFromWord('LIMIT') || this.tokenizeFromWord('JOIN') || this.tokenizeFromWord('LEFT') || this.tokenizeFromWord('RIGHT') || this.tokenizeFromWord('INNER') || this.tokenizeFromWord('OUTER') || this.tokenizeFromWord('ON') || this.tokenizeFromWord('AS') || this.tokenizeFromWord('UNION') || this.tokenizeFromWord('ALL') || this.tokenizeFromWord('UPDATE') || this.tokenizeFromWord('SET'); }; Lexer.prototype.dotToken = function() { @@ -188,7 +188,7 @@ return str.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&"); }; - SQL_KEYWORDS = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'ORDER BY', 'HAVING', 'AS']; + SQL_KEYWORDS = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'ORDER BY', 'HAVING', 'AS', 'UPDATE', 'SET']; SQL_FUNCTIONS = ['AVG', 'COUNT', 'MIN', 'MAX', 'SUM']; diff --git a/src/lexer.coffee b/src/lexer.coffee index b2ab754..25d7fee 100644 --- a/src/lexer.coffee +++ b/src/lexer.coffee @@ -80,7 +80,9 @@ class Lexer @tokenizeFromWord('ON') or @tokenizeFromWord('AS') or @tokenizeFromWord('UNION') or - @tokenizeFromWord('ALL') + @tokenizeFromWord('ALL') or + @tokenizeFromWord('UPDATE') or + @tokenizeFromWord('SET') dotToken: -> @tokenizeFromWord('DOT', '.') operatorToken: -> @tokenizeFromList('OPERATOR', SQL_OPERATORS) @@ -124,7 +126,7 @@ class Lexer regexEscape: (str) -> str.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&") - SQL_KEYWORDS = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'ORDER BY', 'HAVING', 'AS'] + SQL_KEYWORDS = ['SELECT', 'FROM', 'WHERE', 'GROUP BY', 'ORDER BY', 'HAVING', 'AS', 'UPDATE', 'SET'] SQL_FUNCTIONS = ['AVG', 'COUNT', 'MIN', 'MAX', 'SUM'] SQL_SORT_ORDERS = ['ASC', 'DESC'] SQL_OPERATORS = ['=', '>', '<', 'LIKE', 'IS NOT', 'IS'] diff --git a/test/lexer.spec.coffee b/test/lexer.spec.coffee index 0769d49..31a78c8 100644 --- a/test/lexer.spec.coffee +++ b/test/lexer.spec.coffee @@ -63,3 +63,15 @@ describe "SQL Lexer", -> [ 'LITERAL', 'id', 1 ] ["EOF", "", 1] ] + + it 'eats update', -> + tokens = lexer.tokenize('update a set f1 = f2') + tokens.should.eql [ + ['UPDATE', 'update', 1] + ['LITERAL', 'a', 1] + ['SET', 'set', 1] + ['LITERAL', 'f1', 1 ] + ['OPERATOR', '=', 1 ] + ['LITERAL', 'f2', 1 ] + ['EOF', '', 1] + ]