[Python-modules-commits] [sqlparse] 01/03: Import sqlparse_0.1.18.orig.tar.gz

Piotr Ożarowski piotr at moszumanska.debian.org
Sun Nov 8 19:32:04 UTC 2015


This is an automated email from the git hooks/post-receive script.

piotr pushed a commit to branch master
in repository sqlparse.

commit b9a94f8b1474edd378ef9c8afcdfd277a8ca9da0
Author: Piotr Ożarowski <piotr at debian.org>
Date:   Sun Nov 8 20:23:52 2015 +0100

    Import sqlparse_0.1.18.orig.tar.gz
---
 AUTHORS                     |  2 ++
 CHANGES                     | 21 +++++++++++++++++++++
 Makefile                    |  4 ++++
 PKG-INFO                    |  2 +-
 sqlparse.egg-info/PKG-INFO  |  2 +-
 sqlparse/__init__.py        |  2 +-
 sqlparse/engine/filter.py   |  3 +++
 sqlparse/engine/grouping.py | 25 +++++++++++++++----------
 sqlparse/sql.py             | 14 +++++++++++---
 tests/test_regressions.py   | 13 +++++++++++++
 10 files changed, 72 insertions(+), 16 deletions(-)

diff --git a/AUTHORS b/AUTHORS
index e2ecbe7..78052ff 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -22,7 +22,9 @@ Alphabetical list of contributors:
 * quest <quest at wonky.windwards.net>
 * Robert Nix <com.github at rnix.org>
 * Rocky Meza <rmeza at fusionbox.com>
+* Ryan Wooden <rygwdn at gmail.com>
 * spigwitmer <itgpmc at gmail.com>
+* Tim Graham <timograham at gmail.com>
 * Victor Hahn <info at victor-hahn.de>
 * vthriller <farreva232 at yandex.ru>
 * wayne.wuw <wayne.wuw at alibaba-inc.com>
diff --git a/CHANGES b/CHANGES
index 30de31e..4603041 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,24 @@
+Release 0.1.18 (Oct 25, 2015)
+-----------------------------
+
+Bug Fixes
+* Remove universal wheel support, added in 0.1.17 by mistake.
+
+
+Release 0.1.17 (Oct 24, 2015)
+-----------------------------
+
+Enhancements
+* Speed up parsing of large SQL statements (pull request: issue201, fixes the
+  following issues: issue199, issue135, issue62, issue41, by Ryan Wooden).
+
+Bug Fixes
+* Fix another splitter bug regarding DECLARE (issue194).
+
+Misc
+* Packages on PyPI are signed from now on.
+
+
 Release 0.1.16 (Jul 26, 2015)
 -----------------------------
 
diff --git a/Makefile b/Makefile
index 2acd83c..cb3fbf5 100644
--- a/Makefile
+++ b/Makefile
@@ -19,3 +19,7 @@ clean:
 	$(PYTHON) setup.py clean
 	find . -name '*.pyc' -delete
 	find . -name '*~' -delete
+
+release:
+	@rm -rf dist/
+	python setup.py sdist upload --sign --identity E0B84F81
diff --git a/PKG-INFO b/PKG-INFO
index f2a0efb..ff6bee0 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: sqlparse
-Version: 0.1.16
+Version: 0.1.18
 Summary: Non-validating SQL parser
 Home-page: https://github.com/andialbrecht/sqlparse
 Author: Andi Albrecht
diff --git a/sqlparse.egg-info/PKG-INFO b/sqlparse.egg-info/PKG-INFO
index f2a0efb..ff6bee0 100644
--- a/sqlparse.egg-info/PKG-INFO
+++ b/sqlparse.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: sqlparse
-Version: 0.1.16
+Version: 0.1.18
 Summary: Non-validating SQL parser
 Home-page: https://github.com/andialbrecht/sqlparse
 Author: Andi Albrecht
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py
index b9aadc5..59b3ac8 100644
--- a/sqlparse/__init__.py
+++ b/sqlparse/__init__.py
@@ -6,7 +6,7 @@
 """Parse SQL statements."""
 
 
-__version__ = '0.1.16'
+__version__ = '0.1.18'
 
 
 # Setup namespace
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py
index e7ea0ec..f7dd264 100644
--- a/sqlparse/engine/filter.py
+++ b/sqlparse/engine/filter.py
@@ -51,6 +51,9 @@ class StatementFilter:
                 return 1
             return 0
 
+        if unified in ('END IF', 'END FOR'):
+            return -1
+
         if unified == 'END':
             # Should this respect a preceeding BEGIN?
             # In CASE ... WHEN ... END this results in a split level -1.
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index a317044..c8c2415 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -180,7 +180,7 @@ def group_identifier(tlist):
             else:
                 if isinstance(t, sql.Comment) and t.is_multiline():
                     yield t
-                raise StopIteration
+                return
 
     def _next_token(tl, i):
         # chooses the next token. if two tokens are found then the
@@ -188,10 +188,13 @@ def group_identifier(tlist):
         t1 = tl.token_next_by_type(
             i, (T.String.Symbol, T.Name, T.Literal.Number.Integer,
                 T.Literal.Number.Float))
-        t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis))
+
+        i1 = tl.token_index(t1, start=i) if t1 else None
+        t2_end = None if i1 is None else i1 + 1
+        t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis), end=t2_end)
+
         if t1 and t2:
-            i1 = tl.token_index(t1)
-            i2 = tl.token_index(t2)
+            i2 = tl.token_index(t2, start=i)
             if i1 > i2:
                 return t2
             else:
@@ -211,7 +214,7 @@ def group_identifier(tlist):
     while token:
         identifier_tokens = [token] + list(
             _consume_cycle(tlist,
-                           tlist.token_index(token) + 1))
+                           tlist.token_index(token, start=idx) + 1))
         # remove trailing whitespace
         if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
             identifier_tokens = identifier_tokens[:-1]
@@ -220,7 +223,7 @@ def group_identifier(tlist):
                      or identifier_tokens[0].ttype in (T.Literal.Number.Integer,
                                                        T.Literal.Number.Float))):
             group = tlist.group_tokens(sql.Identifier, identifier_tokens)
-            idx = tlist.token_index(group) + 1
+            idx = tlist.token_index(group, start=idx) + 1
         else:
             idx += 1
         token = _next_token(tlist, idx)
@@ -249,8 +252,9 @@ def group_identifier_list(tlist):
     tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
     start = None
     while tcomma is not None:
-        before = tlist.token_prev(tcomma)
-        after = tlist.token_next(tcomma)
+        idx = tlist.token_index(tcomma, start=idx)
+        before = tlist.token_prev(idx)
+        after = tlist.token_next(idx)
         # Check if the tokens around tcomma belong to a list
         bpassed = apassed = False
         for func in fend1_funcs:
@@ -261,12 +265,13 @@ def group_identifier_list(tlist):
         if not bpassed or not apassed:
             # Something's wrong here, skip ahead to next ","
             start = None
-            tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
+            tcomma = tlist.token_next_match(idx + 1,
                                             T.Punctuation, ',')
         else:
             if start is None:
                 start = before
-            next_ = tlist.token_next(after)
+            after_idx = tlist.token_index(after, start=idx)
+            next_ = tlist.token_next(after_idx)
             if next_ is None or not next_.match(T.Punctuation, ','):
                 # Reached the end of the list
                 tokens = tlist.tokens_between(start, after)
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 5ecfbdc..7325712 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -256,7 +256,7 @@ class TokenList(Token):
                 continue
             return token
 
-    def token_next_by_instance(self, idx, clss):
+    def token_next_by_instance(self, idx, clss, end=None):
         """Returns the next token matching a class.
 
         *idx* is where to start searching in the list of child tokens.
@@ -267,7 +267,7 @@ class TokenList(Token):
         if not isinstance(clss, (list, tuple)):
             clss = (clss,)
 
-        for token in self.tokens[idx:]:
+        for token in self.tokens[idx:end]:
             if isinstance(token, clss):
                 return token
 
@@ -343,8 +343,16 @@ class TokenList(Token):
                 continue
             return self.tokens[idx]
 
-    def token_index(self, token):
+    def token_index(self, token, start=0):
         """Return list index of token."""
+        if start > 0:
+            # Performing `index` manually is much faster when starting in the middle
+            # of the list of tokens and expecting to find the token near to the starting
+            # index.
+            for i in xrange(start, len(self.tokens)):
+                if self.tokens[i] == token:
+                    return i
+            return -1
         return self.tokens.index(token)
 
     def tokens_between(self, start, end, exclude_end=False):
diff --git a/tests/test_regressions.py b/tests/test_regressions.py
index ea8cd56..a64b400 100644
--- a/tests/test_regressions.py
+++ b/tests/test_regressions.py
@@ -256,6 +256,19 @@ SELECT * FROM a.b;"""
     splitted = sqlparse.split(sql)
     assert len(splitted) == 2
 
+def test_issue194_splitting_function():
+    sql = """CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)
+BEGIN
+ DECLARE y VARCHAR(20);
+ IF (1 = 1) THEN
+ SET x = y;
+ END IF;
+ RETURN x;
+END;
+SELECT * FROM a.b;"""
+    splitted = sqlparse.split(sql)
+    assert len(splitted) == 2
+
 
 def test_issue186_get_type():
     sql = "-- comment\ninsert into foo"

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/sqlparse.git



More information about the Python-modules-commits mailing list