[Python-modules-commits] [python-werkzeug] 01/05: Import python-werkzeug_0.11.10+dfsg1.orig.tar.gz
Ondřej Nový
onovy-guest at moszumanska.debian.org
Wed May 25 20:41:34 UTC 2016
This is an automated email from the git hooks/post-receive script.
onovy-guest pushed a commit to branch master
in repository python-werkzeug.
commit 7e274781319973d4190701c37d48aca05a44d61d
Author: Ondřej Nový <novy at ondrej.org>
Date: Wed May 25 22:32:57 2016 +0200
Import python-werkzeug_0.11.10+dfsg1.orig.tar.gz
---
CHANGES | 10 ++++++++++
CONTRIBUTING.rst | 4 ++++
tests/test_formparser.py | 3 ++-
tests/test_wsgi.py | 15 +++++++++++++++
werkzeug/__init__.py | 2 +-
werkzeug/debug/__init__.py | 7 +++++--
werkzeug/filesystem.py | 2 +-
werkzeug/formparser.py | 5 +++--
werkzeug/wsgi.py | 39 +++++++++++++++++++++++++++++++++++++--
9 files changed, 78 insertions(+), 9 deletions(-)
diff --git a/CHANGES b/CHANGES
index de99d6e..d22dcb0 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,16 @@
Werkzeug Changelog
==================
+Version 0.11.10
+---------------
+
+Released on May 24th 2016.
+
+- Fixed a bug that occurs when running on Python 2.6 and using a broken locale.
+ See pull request #912.
+- Fixed a crash when running the debugger on Google App Engine. See issue #925.
+- Fixed an issue with multipart parsing that could cause memory exhaustion.
+
Version 0.11.9
--------------
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 2febd5e..8ed4de7 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -39,6 +39,10 @@ Running the testsuite
You probably want to set up a `virtualenv
<http://virtualenv.readthedocs.org/en/latest/index.html>`_.
+Werkzeug must be installed for all tests to pass::
+
+ pip install -e .
+
The minimal requirement for running the testsuite is ``py.test``. You can
install it with::
diff --git a/tests/test_formparser.py b/tests/test_formparser.py
index 2fad89e..7a68e23 100644
--- a/tests/test_formparser.py
+++ b/tests/test_formparser.py
@@ -154,7 +154,8 @@ class TestFormParser(object):
class StreamMPP(formparser.MultiPartParser):
def parse(self, file, boundary, content_length):
- i = iter(self.parse_lines(file, boundary, content_length))
+ i = iter(self.parse_lines(file, boundary, content_length,
+ cap_at_buffer=False))
one = next(i)
two = next(i)
return self.cls(()), {'one': one, 'two': two}
diff --git a/tests/test_wsgi.py b/tests/test_wsgi.py
index 8f7540d..0fdd91d 100644
--- a/tests/test_wsgi.py
+++ b/tests/test_wsgi.py
@@ -381,6 +381,13 @@ def test_make_chunk_iter_bytes():
buffer_size=4))
assert rv == [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz', b'ABCDEFGHIJK']
+ data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
+ test_stream = BytesIO(data)
+ rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
+ buffer_size=4, cap_at_buffer=True))
+ assert rv == [b'abcd', b'ef', b'ghij', b'kl', b'mnop', b'qrst', b'uvwx',
+ b'yz', b'ABCD', b'EFGH', b'IJK']
+
def test_lines_longer_buffer_size():
data = '1234567890\n1234567890\n'
@@ -388,3 +395,11 @@ def test_lines_longer_buffer_size():
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
buffer_size=4))
assert lines == ['1234567890\n', '1234567890\n']
+
+
+def test_lines_longer_buffer_size_cap():
+ data = '1234567890\n1234567890\n'
+ for bufsize in range(1, 15):
+ lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
+ buffer_size=4, cap_at_buffer=True))
+ assert lines == ['1234', '5678', '90\n', '1234', '5678', '90\n']
diff --git a/werkzeug/__init__.py b/werkzeug/__init__.py
index 684516d..4c6f429 100644
--- a/werkzeug/__init__.py
+++ b/werkzeug/__init__.py
@@ -20,7 +20,7 @@ import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
-__version__ = '0.11.9'
+__version__ = '0.11.10'
# This import magic raises concerns quite often which is why the implementation
diff --git a/werkzeug/debug/__init__.py b/werkzeug/debug/__init__.py
index e2539f3..b87321f 100644
--- a/werkzeug/debug/__init__.py
+++ b/werkzeug/debug/__init__.py
@@ -65,14 +65,17 @@ def get_machine_id():
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
- from subprocess import Popen, PIPE
try:
+ # Also catch import errors: subprocess may not be available, e.g.
+ # Google App Engine
+ # See https://github.com/pallets/werkzeug/issues/925
+ from subprocess import Popen, PIPE
dump = Popen(['ioreg', '-c', 'IOPlatformExpertDevice', '-d', '2'],
stdout=PIPE).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
- except OSError:
+ except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
diff --git a/werkzeug/filesystem.py b/werkzeug/filesystem.py
index 3bd96d1..6246746 100644
--- a/werkzeug/filesystem.py
+++ b/werkzeug/filesystem.py
@@ -59,7 +59,7 @@ def get_filesystem_encoding():
if not _warned_about_filesystem_encoding:
warnings.warn(
'Detected a misconfigured UNIX filesystem: Will use UTF-8 as '
- 'filesystem encoding instead of {!r}'.format(rv),
+ 'filesystem encoding instead of {0!r}'.format(rv),
BrokenFilesystemWarning)
_warned_about_filesystem_encoding = True
return 'utf-8'
diff --git a/werkzeug/formparser.py b/werkzeug/formparser.py
index b873171..1148691 100644
--- a/werkzeug/formparser.py
+++ b/werkzeug/formparser.py
@@ -372,7 +372,7 @@ class MultiPartParser(object):
# the assert is skipped.
self.fail('Boundary longer than buffer size')
- def parse_lines(self, file, boundary, content_length):
+ def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
@@ -387,7 +387,8 @@ class MultiPartParser(object):
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
- buffer_size=self.buffer_size),
+ buffer_size=self.buffer_size,
+ cap_at_buffer=cap_at_buffer),
_empty_string_iter)
terminator = self._find_terminator(iterator)
diff --git a/werkzeug/wsgi.py b/werkzeug/wsgi.py
index 455258f..2e1c584 100644
--- a/werkzeug/wsgi.py
+++ b/werkzeug/wsgi.py
@@ -784,7 +784,8 @@ def _make_chunk_iter(stream, limit, buffer_size):
yield item
-def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
+def make_line_iter(stream, limit=None, buffer_size=10 * 1024,
+ cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
@@ -808,6 +809,12 @@ def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
+ :param cap_at_buffer: if this is set chunks are split if they are longer
+ than the buffer size. Internally this is implemented
+ that the buffer size might be exhausted by a factor
+ of two however.
+ .. versionadded:: 0.11.10
+ added support for the `cap_at_buffer` parameter.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
@@ -831,11 +838,19 @@ def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
if not new_data:
break
new_buf = []
+ buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
+ buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
+ elif cap_at_buffer and buf_size >= buffer_size:
+ rv = _join(new_buf)
+ while len(rv) >= buffer_size:
+ yield rv[:buffer_size]
+ rv = rv[buffer_size:]
+ new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
@@ -854,7 +869,8 @@ def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
yield previous
-def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
+def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
+ cap_at_buffer=False):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
@@ -865,12 +881,19 @@ def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
.. versionadded:: 0.9
added support for iterators as input stream.
+ .. versionadded:: 0.11.10
+ added support for the `cap_at_buffer` parameter.
+
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
+ :param cap_at_buffer: if this is set chunks are split if they are longer
+ than the buffer size. Internally this is implemented
+ that the buffer size might be exhausted by a factor
+ of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
@@ -895,12 +918,24 @@ def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
break
chunks = _split(new_data)
new_buf = []
+ buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
+ buf_size = 0
else:
+ buf_size += len(item)
new_buf.append(item)
+
+ if cap_at_buffer and buf_size >= buffer_size:
+ rv = _join(new_buf)
+ while len(rv) >= buffer_size:
+ yield rv[:buffer_size]
+ rv = rv[buffer_size:]
+ new_buf = [rv]
+ buf_size = len(rv)
+
buffer = new_buf
if buffer:
yield _join(buffer)
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/python-modules/packages/python-werkzeug.git
More information about the Python-modules-commits
mailing list